diff --git a/tests/certification/bindings/kafka/components-retry/consumer2/kafka.yaml b/tests/certification/bindings/kafka/components-retry/consumer2/kafka.yaml new file mode 100644 index 000000000..99b6ca53a --- /dev/null +++ b/tests/certification/bindings/kafka/components-retry/consumer2/kafka.yaml @@ -0,0 +1,24 @@ +apiVersion: dapr.io/v1alpha1 +kind: Component +metadata: + name: messagebus +spec: + type: bindings.kafka + version: v1 + metadata: + - name: topics # Input binding topic + value: neworder + - name: publishTopic # Outpub binding topic + value: neworder + - name: brokers + value: localhost:19092,localhost:29092,localhost:39092 + - name: consumerGroup + value: kafkaCertification2 + - name: authType + value: "none" + - name: initialOffset + value: oldest + - name: backOffDuration + value: 50ms + - name: backOffDuration + value: 50ms diff --git a/tests/certification/bindings/kafka/components-retry/mtls-consumer/kafka.yaml b/tests/certification/bindings/kafka/components-retry/mtls-consumer/kafka.yaml deleted file mode 100644 index e4711d16b..000000000 --- a/tests/certification/bindings/kafka/components-retry/mtls-consumer/kafka.yaml +++ /dev/null @@ -1,98 +0,0 @@ -apiVersion: dapr.io/v1alpha1 -kind: Component -metadata: - name: messagebus -spec: - type: bindings.kafka - version: v1 - metadata: - - name: topics # Input binding topic - value: neworder - - name: publishTopic # Outpub binding topic - value: neworder - - name: consumeRetryEnabled # enable consumer retry - value: true - - name: brokers - value: localhost:19094,localhost:29094,localhost:39094 - - name: consumerGroup - value: kafkaCertification2 - - name: initialOffset - value: oldest - - name: backOffDuration - value: 50ms - - name: authType - value: mtls - - name: caCert - value: | - -----BEGIN CERTIFICATE----- - MIIDJjCCAg6gAwIBAgIUJPqvjfNx6kMf7mE5FtW81+X8HekwDQYJKoZIhvcNAQEL - BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew - HhcNMjExMjA0MTYyNzAwWhcNMjYxMjAzMTYyNzAwWjArMRIwEAYDVQQLEwlEYXBy - IFRlc3QxFTATBgNVBAMTDERhcHIgVGVzdCBDQTCCASIwDQYJKoZIhvcNAQEBBQAD - ggEPADCCAQoCggEBAMPLpsfCUdYf+7RAY7mktcj4/qJJyNroHxS8ChwSeJ0M/dLk - I6G4kyty3TGvzmrdxkr2DW2B+ZmrZFzSVQg+kNESMhEWLJt4MtyGMNuDZcwV5kJL - NPltLYmov2z8hyD2v6agZNyiWM0k2p/dl+Ikp4DJmd08PSd+nhc5Wj9X33gsEAoK - jKptl+XGGvSlC3tIbHmBhRsP42QlLjqk5PWxINbMDePHOiYFmau3VRrbPweKTFuF - bY0Y0w8t1qOFX55hU7LkMEXjLmuUfFUEZvn3NUTvH80gKDioiJTC7NBRE6sCYAlm - b4Vvix3p9Y/yNKbMA5J3chaZdTZfVqAXplZY3jMCAwEAAaNCMEAwDgYDVR0PAQH/ - BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFH6X+clU0D49SZ9ezWRg - lsF83glvMA0GCSqGSIb3DQEBCwUAA4IBAQAjeaFNxIhWZkDYiwsVP5R2JqFifZbq - A/m9YJypRwA+rUeBLFGuIh4QPFf2fZlskJYmFaDB3aplQGoSIzB1HCC0OAhJM5Ec - z6gm+bhqDfCaWz1HfmpvvQes1l/mUzYx5GfiX202W87CMKMQ+5WSg1IsCPFwYN2w - nZkGKYkh9D9TzIFMfi2b1G+O+BuUUyOAXvT8zcJ17GexRHHdc1Gq+1PgDPDL1Sug - rLHmo+dDTZhIV5D14wvxsNHTTr5tt0aaQw3fJqo6P2HE2dBiqadSYnlwS7BQ9Jxc - MlmFggFubM9/QGQ/hGQYmTp+LSlM5ndaVA80o7+SOQZ2aliuH0fQN3ST - -----END CERTIFICATE----- - - name: clientCert - value: | - -----BEGIN CERTIFICATE----- - MIIDpTCCAo2gAwIBAgIUTAjabskCLxIqbh2E4MnYIsivipswDQYJKoZIhvcNAQEL - BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew - HhcNMjExMjA0MTkwMjAwWhcNMjIxMjA0MTkwMjAwWjAjMRIwEAYDVQQKEwlEYXBy - IFRlc3QxDTALBgNVBAMTBGRhcHIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK - AoIBAQC5rlhpdzY2RuRRRKevotZnLUx/dh2wLvCMluSxKFJYvC7DXK3cHZh1+6Wo - cdlsEYY3ZQ7Pt/N8DkV7ODqSvFyhJu+1fCY3elMfZcxSw24UJ2aXzlx5RbNhLAI0 - E804ugAp3qss4ygCwQ4U2jMGXqeVpi7gyGsYybEUOMSorx5OBgiJAKkaATNMBqdp - MX2FKzBU3owpAcuXhIGSdKblYQuZJmAfITnaJFO4ffLyn9m4I9n/dDfZag/TCZBL - 27uIo79mZO99YfhMfdrifH3FkvE/14/JUPhwHAChoCbDol0/V/KDv0tp3vQbQH+7 - 1dyrAWhszswSXQGgADYm8y74dlQpAgMBAAGjgcgwgcUwDgYDVR0PAQH/BAQDAgWg - MB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0G - A1UdDgQWBBQ4eToXZz4AH4YbuW23vy99T8d8OTAfBgNVHSMEGDAWgBR+l/nJVNA+ - PUmfXs1kYJbBfN4JbzBGBgNVHREEPzA9ggRkYXBygglsb2NhbGhvc3SCB2thZmth - LTGCB2thZmthLTKCB2thZmthLTOCD2thZmFrLWJvb3RzdHJhcDANBgkqhkiG9w0B - AQsFAAOCAQEAAapIJIdQhGF2qz/N4i/nIwJHGxUapgtVrydC8kw7DeuQi2usG62Y - hGNnBAoJCR0auSQ2P3SWEO19o1doZjFroqFkNIXdTT+aHxLg0k89H203oeMSI43x - xTlmJCjBNw4zQD9jC1c6u/W6WBwN2SJGBZrdmA95KQrz+gan9nh6ecPYeGF89io2 - G20dRE2cGwbt7LAImK87M8LXbw/Of28gYMh3L14CNy6oma3izMix9xhUhDVACnVy - TaltjNIiAlFP2g4GIsPSYTMAOeIzIU/LxKlxg8mLg1bTPwb5IZK1wFwPBY5rnNqx - OrycW7rZKfrg2eZml8FnYlzO64u41oC47A== - -----END CERTIFICATE----- - - name: clientKey - value: | - -----BEGIN RSA PRIVATE KEY----- - MIIEogIBAAKCAQEAua5YaXc2NkbkUUSnr6LWZy1Mf3YdsC7wjJbksShSWLwuw1yt - 3B2YdfulqHHZbBGGN2UOz7fzfA5Fezg6krxcoSbvtXwmN3pTH2XMUsNuFCdml85c - eUWzYSwCNBPNOLoAKd6rLOMoAsEOFNozBl6nlaYu4MhrGMmxFDjEqK8eTgYIiQCp - GgEzTAanaTF9hSswVN6MKQHLl4SBknSm5WELmSZgHyE52iRTuH3y8p/ZuCPZ/3Q3 - 2WoP0wmQS9u7iKO/ZmTvfWH4TH3a4nx9xZLxP9ePyVD4cBwAoaAmw6JdP1fyg79L - ad70G0B/u9XcqwFobM7MEl0BoAA2JvMu+HZUKQIDAQABAoIBACZz2JNewLdUzwuV - cDSLQGN1mhX7XAKUdfRne0zE0OjXb8e9dbPT3TLxvki36xLaPjVSlFKoAaB7RCBU - cKzanUQyUAoBf9iVWIl0B3BMUIuT7Uca0UO8D33cI0itoR5SRp5lIoXVNP/9AvGG - jnKPP51aIPMkDim/+w/5AaD9QwVdGC2BWNn8bFykz/DfIB0tiVTec8/pWaP7vHGM - FriQbL07Yrj3BE0ndp5cL52ZbH9OmQ/hXUHCk6vCuV/yrqljeLPGbEYkpmhm/fMO - Fa3pX6wR+QgZ5lta870jK52bexyoGWgsMcTTl8+7q4DYM2YREEKicAlbOh92bdm4 - tnjIiVECgYEA1btWqCtxWat5tzXeYAowYs/uia/ANbmg+SGqIeVqGn4EyLIBYnmZ - jexfWliLj7Nk802fbNIO9sStMt6q7vvRbYR2ZHFPU0Th9m/XVPdJKJ9qpMkSWdY3 - P7VlQuYHSZvU1ny/QtDc8dGoaxluiaJsIBde0UUcwOo/tA66OnP2n7cCgYEA3mbf - hz6W+ThofDPyJN5kFTnx4g+uNA8hnqyJeh9xcnh1t/A5BH4faZBPhokoskahUWis - yI4v6e552CHkF9jo6k397xUb/W/HO0BlKhapf8prdrG4zSE5pr140eTdr10h95SD - Wr4twfEaBNsSXRnaMxAMaVbPKfLuW0+N1Qbk6x8CgYA8EZnKS+Ngk0vzDOXB0jtF - GjFtawK3VsOCIU8ClcqbRX2stjKjbY+VjrBB4Q7gRUgDBXbgC61+90nCOUiLQCTd - BdSMaDgmK/7h1w8K5zEdhKhhRc2tiAIhGqcqBSJZMr2/xnGuoqrmH8mYyB4D+q0u - 28KfSDBLm8ppnZYDZaITwwKBgDv76xYDH50gRa4aJJklEkFXW5HpQMbxvdOaHYo+ - qM6DBt0RgY9gpQBH1+slW0CaJDBc1x1QnEOv+lT87xQvgMKRPogZXW9Bkq68c4yi - iBzbb5iX3owVBgOe3tNdsxz1NZAdEkCLQrQoXygoHg/WRS+4iGBw9XcO+pLOJibq - sRtpAoGARUL0cfedOtIgGOQTNzfHqQZsRbLEKx64FI6Q8g1womr7lWWXy6RX4BZv - vm41g/PkdiES9ZfaNihRHcEhaNuA26OhiCbXe/FRcyZRX9TeCkuyQgNn9nssPIgR - edWdnN8kZKQ7ReZwMlw2UpXenAwlVoQQbHw9zpkcD2Exmp/TLAk= - -----END RSA PRIVATE KEY----- diff --git a/tests/certification/bindings/kafka/components-retry/oauth-consumer/kafka.yaml b/tests/certification/bindings/kafka/components-retry/oauth-consumer/kafka.yaml deleted file mode 100644 index 96d6ce3ae..000000000 --- a/tests/certification/bindings/kafka/components-retry/oauth-consumer/kafka.yaml +++ /dev/null @@ -1,53 +0,0 @@ -apiVersion: dapr.io/v1alpha1 -kind: Component -metadata: - name: messagebus -spec: - type: bindings.kafka - version: v1 - metadata: - - name: topics # Input binding topic - value: neworder - - name: publishTopic # Outpub binding topic - value: neworder - - name: consumeRetryEnabled # enable consumer retry - value: true - - name: brokers - value: localhost:19093,localhost:29093,localhost:39093 - - name: consumerGroup - value: kafkaCertification2 - - name: authType - value: "oidc" - - name: initialOffset - value: oldest - - name: backOffDuration - value: 50ms - - name: oidcTokenEndpoint - value: https://localhost:4443/oauth2/token - - name: oidcClientID - value: "dapr" - - name: oidcClientSecret - value: "dapr-test" - - name: oidcScopes - value: openid,kafka - - name: caCert - value: | - -----BEGIN CERTIFICATE----- - MIIDJjCCAg6gAwIBAgIUJPqvjfNx6kMf7mE5FtW81+X8HekwDQYJKoZIhvcNAQEL - BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew - HhcNMjExMjA0MTYyNzAwWhcNMjYxMjAzMTYyNzAwWjArMRIwEAYDVQQLEwlEYXBy - IFRlc3QxFTATBgNVBAMTDERhcHIgVGVzdCBDQTCCASIwDQYJKoZIhvcNAQEBBQAD - ggEPADCCAQoCggEBAMPLpsfCUdYf+7RAY7mktcj4/qJJyNroHxS8ChwSeJ0M/dLk - I6G4kyty3TGvzmrdxkr2DW2B+ZmrZFzSVQg+kNESMhEWLJt4MtyGMNuDZcwV5kJL - NPltLYmov2z8hyD2v6agZNyiWM0k2p/dl+Ikp4DJmd08PSd+nhc5Wj9X33gsEAoK - jKptl+XGGvSlC3tIbHmBhRsP42QlLjqk5PWxINbMDePHOiYFmau3VRrbPweKTFuF - bY0Y0w8t1qOFX55hU7LkMEXjLmuUfFUEZvn3NUTvH80gKDioiJTC7NBRE6sCYAlm - b4Vvix3p9Y/yNKbMA5J3chaZdTZfVqAXplZY3jMCAwEAAaNCMEAwDgYDVR0PAQH/ - BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFH6X+clU0D49SZ9ezWRg - lsF83glvMA0GCSqGSIb3DQEBCwUAA4IBAQAjeaFNxIhWZkDYiwsVP5R2JqFifZbq - A/m9YJypRwA+rUeBLFGuIh4QPFf2fZlskJYmFaDB3aplQGoSIzB1HCC0OAhJM5Ec - z6gm+bhqDfCaWz1HfmpvvQes1l/mUzYx5GfiX202W87CMKMQ+5WSg1IsCPFwYN2w - nZkGKYkh9D9TzIFMfi2b1G+O+BuUUyOAXvT8zcJ17GexRHHdc1Gq+1PgDPDL1Sug - rLHmo+dDTZhIV5D14wvxsNHTTr5tt0aaQw3fJqo6P2HE2dBiqadSYnlwS7BQ9Jxc - MlmFggFubM9/QGQ/hGQYmTp+LSlM5ndaVA80o7+SOQZ2aliuH0fQN3ST - -----END CERTIFICATE----- diff --git a/tests/certification/bindings/kafka/components/consumer2/kafka.yaml b/tests/certification/bindings/kafka/components/consumer2/kafka.yaml new file mode 100644 index 000000000..99b6ca53a --- /dev/null +++ b/tests/certification/bindings/kafka/components/consumer2/kafka.yaml @@ -0,0 +1,24 @@ +apiVersion: dapr.io/v1alpha1 +kind: Component +metadata: + name: messagebus +spec: + type: bindings.kafka + version: v1 + metadata: + - name: topics # Input binding topic + value: neworder + - name: publishTopic # Outpub binding topic + value: neworder + - name: brokers + value: localhost:19092,localhost:29092,localhost:39092 + - name: consumerGroup + value: kafkaCertification2 + - name: authType + value: "none" + - name: initialOffset + value: oldest + - name: backOffDuration + value: 50ms + - name: backOffDuration + value: 50ms diff --git a/tests/certification/bindings/kafka/components/mtls-consumer/kafka.yaml b/tests/certification/bindings/kafka/components/mtls-consumer/kafka.yaml deleted file mode 100644 index a4de50c37..000000000 --- a/tests/certification/bindings/kafka/components/mtls-consumer/kafka.yaml +++ /dev/null @@ -1,96 +0,0 @@ -apiVersion: dapr.io/v1alpha1 -kind: Component -metadata: - name: messagebus -spec: - type: bindings.kafka - version: v1 - metadata: - - name: topics # Input binding topic - value: neworder - - name: publishTopic # Outpub binding topic - value: neworder - - name: brokers - value: localhost:19094,localhost:29094,localhost:39094 - - name: consumerGroup - value: kafkaCertification2 - - name: initialOffset - value: oldest - - name: backOffDuration - value: 50ms - - name: authType - value: mtls - - name: caCert - value: | - -----BEGIN CERTIFICATE----- - MIIDJjCCAg6gAwIBAgIUJPqvjfNx6kMf7mE5FtW81+X8HekwDQYJKoZIhvcNAQEL - BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew - HhcNMjExMjA0MTYyNzAwWhcNMjYxMjAzMTYyNzAwWjArMRIwEAYDVQQLEwlEYXBy - IFRlc3QxFTATBgNVBAMTDERhcHIgVGVzdCBDQTCCASIwDQYJKoZIhvcNAQEBBQAD - ggEPADCCAQoCggEBAMPLpsfCUdYf+7RAY7mktcj4/qJJyNroHxS8ChwSeJ0M/dLk - I6G4kyty3TGvzmrdxkr2DW2B+ZmrZFzSVQg+kNESMhEWLJt4MtyGMNuDZcwV5kJL - NPltLYmov2z8hyD2v6agZNyiWM0k2p/dl+Ikp4DJmd08PSd+nhc5Wj9X33gsEAoK - jKptl+XGGvSlC3tIbHmBhRsP42QlLjqk5PWxINbMDePHOiYFmau3VRrbPweKTFuF - bY0Y0w8t1qOFX55hU7LkMEXjLmuUfFUEZvn3NUTvH80gKDioiJTC7NBRE6sCYAlm - b4Vvix3p9Y/yNKbMA5J3chaZdTZfVqAXplZY3jMCAwEAAaNCMEAwDgYDVR0PAQH/ - BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFH6X+clU0D49SZ9ezWRg - lsF83glvMA0GCSqGSIb3DQEBCwUAA4IBAQAjeaFNxIhWZkDYiwsVP5R2JqFifZbq - A/m9YJypRwA+rUeBLFGuIh4QPFf2fZlskJYmFaDB3aplQGoSIzB1HCC0OAhJM5Ec - z6gm+bhqDfCaWz1HfmpvvQes1l/mUzYx5GfiX202W87CMKMQ+5WSg1IsCPFwYN2w - nZkGKYkh9D9TzIFMfi2b1G+O+BuUUyOAXvT8zcJ17GexRHHdc1Gq+1PgDPDL1Sug - rLHmo+dDTZhIV5D14wvxsNHTTr5tt0aaQw3fJqo6P2HE2dBiqadSYnlwS7BQ9Jxc - MlmFggFubM9/QGQ/hGQYmTp+LSlM5ndaVA80o7+SOQZ2aliuH0fQN3ST - -----END CERTIFICATE----- - - name: clientCert - value: | - -----BEGIN CERTIFICATE----- - MIIDpTCCAo2gAwIBAgIUTAjabskCLxIqbh2E4MnYIsivipswDQYJKoZIhvcNAQEL - BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew - HhcNMjExMjA0MTkwMjAwWhcNMjIxMjA0MTkwMjAwWjAjMRIwEAYDVQQKEwlEYXBy - IFRlc3QxDTALBgNVBAMTBGRhcHIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK - AoIBAQC5rlhpdzY2RuRRRKevotZnLUx/dh2wLvCMluSxKFJYvC7DXK3cHZh1+6Wo - cdlsEYY3ZQ7Pt/N8DkV7ODqSvFyhJu+1fCY3elMfZcxSw24UJ2aXzlx5RbNhLAI0 - E804ugAp3qss4ygCwQ4U2jMGXqeVpi7gyGsYybEUOMSorx5OBgiJAKkaATNMBqdp - MX2FKzBU3owpAcuXhIGSdKblYQuZJmAfITnaJFO4ffLyn9m4I9n/dDfZag/TCZBL - 27uIo79mZO99YfhMfdrifH3FkvE/14/JUPhwHAChoCbDol0/V/KDv0tp3vQbQH+7 - 1dyrAWhszswSXQGgADYm8y74dlQpAgMBAAGjgcgwgcUwDgYDVR0PAQH/BAQDAgWg - MB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0G - A1UdDgQWBBQ4eToXZz4AH4YbuW23vy99T8d8OTAfBgNVHSMEGDAWgBR+l/nJVNA+ - PUmfXs1kYJbBfN4JbzBGBgNVHREEPzA9ggRkYXBygglsb2NhbGhvc3SCB2thZmth - LTGCB2thZmthLTKCB2thZmthLTOCD2thZmFrLWJvb3RzdHJhcDANBgkqhkiG9w0B - AQsFAAOCAQEAAapIJIdQhGF2qz/N4i/nIwJHGxUapgtVrydC8kw7DeuQi2usG62Y - hGNnBAoJCR0auSQ2P3SWEO19o1doZjFroqFkNIXdTT+aHxLg0k89H203oeMSI43x - xTlmJCjBNw4zQD9jC1c6u/W6WBwN2SJGBZrdmA95KQrz+gan9nh6ecPYeGF89io2 - G20dRE2cGwbt7LAImK87M8LXbw/Of28gYMh3L14CNy6oma3izMix9xhUhDVACnVy - TaltjNIiAlFP2g4GIsPSYTMAOeIzIU/LxKlxg8mLg1bTPwb5IZK1wFwPBY5rnNqx - OrycW7rZKfrg2eZml8FnYlzO64u41oC47A== - -----END CERTIFICATE----- - - name: clientKey - value: | - -----BEGIN RSA PRIVATE KEY----- - MIIEogIBAAKCAQEAua5YaXc2NkbkUUSnr6LWZy1Mf3YdsC7wjJbksShSWLwuw1yt - 3B2YdfulqHHZbBGGN2UOz7fzfA5Fezg6krxcoSbvtXwmN3pTH2XMUsNuFCdml85c - eUWzYSwCNBPNOLoAKd6rLOMoAsEOFNozBl6nlaYu4MhrGMmxFDjEqK8eTgYIiQCp - GgEzTAanaTF9hSswVN6MKQHLl4SBknSm5WELmSZgHyE52iRTuH3y8p/ZuCPZ/3Q3 - 2WoP0wmQS9u7iKO/ZmTvfWH4TH3a4nx9xZLxP9ePyVD4cBwAoaAmw6JdP1fyg79L - ad70G0B/u9XcqwFobM7MEl0BoAA2JvMu+HZUKQIDAQABAoIBACZz2JNewLdUzwuV - cDSLQGN1mhX7XAKUdfRne0zE0OjXb8e9dbPT3TLxvki36xLaPjVSlFKoAaB7RCBU - cKzanUQyUAoBf9iVWIl0B3BMUIuT7Uca0UO8D33cI0itoR5SRp5lIoXVNP/9AvGG - jnKPP51aIPMkDim/+w/5AaD9QwVdGC2BWNn8bFykz/DfIB0tiVTec8/pWaP7vHGM - FriQbL07Yrj3BE0ndp5cL52ZbH9OmQ/hXUHCk6vCuV/yrqljeLPGbEYkpmhm/fMO - Fa3pX6wR+QgZ5lta870jK52bexyoGWgsMcTTl8+7q4DYM2YREEKicAlbOh92bdm4 - tnjIiVECgYEA1btWqCtxWat5tzXeYAowYs/uia/ANbmg+SGqIeVqGn4EyLIBYnmZ - jexfWliLj7Nk802fbNIO9sStMt6q7vvRbYR2ZHFPU0Th9m/XVPdJKJ9qpMkSWdY3 - P7VlQuYHSZvU1ny/QtDc8dGoaxluiaJsIBde0UUcwOo/tA66OnP2n7cCgYEA3mbf - hz6W+ThofDPyJN5kFTnx4g+uNA8hnqyJeh9xcnh1t/A5BH4faZBPhokoskahUWis - yI4v6e552CHkF9jo6k397xUb/W/HO0BlKhapf8prdrG4zSE5pr140eTdr10h95SD - Wr4twfEaBNsSXRnaMxAMaVbPKfLuW0+N1Qbk6x8CgYA8EZnKS+Ngk0vzDOXB0jtF - GjFtawK3VsOCIU8ClcqbRX2stjKjbY+VjrBB4Q7gRUgDBXbgC61+90nCOUiLQCTd - BdSMaDgmK/7h1w8K5zEdhKhhRc2tiAIhGqcqBSJZMr2/xnGuoqrmH8mYyB4D+q0u - 28KfSDBLm8ppnZYDZaITwwKBgDv76xYDH50gRa4aJJklEkFXW5HpQMbxvdOaHYo+ - qM6DBt0RgY9gpQBH1+slW0CaJDBc1x1QnEOv+lT87xQvgMKRPogZXW9Bkq68c4yi - iBzbb5iX3owVBgOe3tNdsxz1NZAdEkCLQrQoXygoHg/WRS+4iGBw9XcO+pLOJibq - sRtpAoGARUL0cfedOtIgGOQTNzfHqQZsRbLEKx64FI6Q8g1womr7lWWXy6RX4BZv - vm41g/PkdiES9ZfaNihRHcEhaNuA26OhiCbXe/FRcyZRX9TeCkuyQgNn9nssPIgR - edWdnN8kZKQ7ReZwMlw2UpXenAwlVoQQbHw9zpkcD2Exmp/TLAk= - -----END RSA PRIVATE KEY----- diff --git a/tests/certification/bindings/kafka/components/oauth-consumer/kafka.yaml b/tests/certification/bindings/kafka/components/oauth-consumer/kafka.yaml deleted file mode 100644 index 660dcfb7a..000000000 --- a/tests/certification/bindings/kafka/components/oauth-consumer/kafka.yaml +++ /dev/null @@ -1,51 +0,0 @@ -apiVersion: dapr.io/v1alpha1 -kind: Component -metadata: - name: messagebus -spec: - type: bindings.kafka - version: v1 - metadata: - - name: topics # Input binding topic - value: neworder - - name: publishTopic # Outpub binding topic - value: neworder - - name: brokers - value: localhost:19093,localhost:29093,localhost:39093 - - name: consumerGroup - value: kafkaCertification2 - - name: authType - value: "oidc" - - name: initialOffset - value: oldest - - name: backOffDuration - value: 50ms - - name: oidcTokenEndpoint - value: https://localhost:4443/oauth2/token - - name: oidcClientID - value: "dapr" - - name: oidcClientSecret - value: "dapr-test" - - name: oidcScopes - value: openid,kafka - - name: caCert - value: | - -----BEGIN CERTIFICATE----- - MIIDJjCCAg6gAwIBAgIUJPqvjfNx6kMf7mE5FtW81+X8HekwDQYJKoZIhvcNAQEL - BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew - HhcNMjExMjA0MTYyNzAwWhcNMjYxMjAzMTYyNzAwWjArMRIwEAYDVQQLEwlEYXBy - IFRlc3QxFTATBgNVBAMTDERhcHIgVGVzdCBDQTCCASIwDQYJKoZIhvcNAQEBBQAD - ggEPADCCAQoCggEBAMPLpsfCUdYf+7RAY7mktcj4/qJJyNroHxS8ChwSeJ0M/dLk - I6G4kyty3TGvzmrdxkr2DW2B+ZmrZFzSVQg+kNESMhEWLJt4MtyGMNuDZcwV5kJL - NPltLYmov2z8hyD2v6agZNyiWM0k2p/dl+Ikp4DJmd08PSd+nhc5Wj9X33gsEAoK - jKptl+XGGvSlC3tIbHmBhRsP42QlLjqk5PWxINbMDePHOiYFmau3VRrbPweKTFuF - bY0Y0w8t1qOFX55hU7LkMEXjLmuUfFUEZvn3NUTvH80gKDioiJTC7NBRE6sCYAlm - b4Vvix3p9Y/yNKbMA5J3chaZdTZfVqAXplZY3jMCAwEAAaNCMEAwDgYDVR0PAQH/ - BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFH6X+clU0D49SZ9ezWRg - lsF83glvMA0GCSqGSIb3DQEBCwUAA4IBAQAjeaFNxIhWZkDYiwsVP5R2JqFifZbq - A/m9YJypRwA+rUeBLFGuIh4QPFf2fZlskJYmFaDB3aplQGoSIzB1HCC0OAhJM5Ec - z6gm+bhqDfCaWz1HfmpvvQes1l/mUzYx5GfiX202W87CMKMQ+5WSg1IsCPFwYN2w - nZkGKYkh9D9TzIFMfi2b1G+O+BuUUyOAXvT8zcJ17GexRHHdc1Gq+1PgDPDL1Sug - rLHmo+dDTZhIV5D14wvxsNHTTr5tt0aaQw3fJqo6P2HE2dBiqadSYnlwS7BQ9Jxc - MlmFggFubM9/QGQ/hGQYmTp+LSlM5ndaVA80o7+SOQZ2aliuH0fQN3ST - -----END CERTIFICATE----- diff --git a/tests/certification/bindings/kafka/docker-compose.yml b/tests/certification/bindings/kafka/docker-compose.yml index 1a8a67ca5..4e4f62a0a 100644 --- a/tests/certification/bindings/kafka/docker-compose.yml +++ b/tests/certification/bindings/kafka/docker-compose.yml @@ -1,191 +1,68 @@ -version: "3.7" -services: - zookeeper: - image: confluentinc/cp-zookeeper:5.4.0 - hostname: zookeeper - container_name: zookeeper - ports: - - "2181:2181" - environment: - ZOOKEEPER_CLIENT_PORT: 2181 - ZOOKEEPER_TICK_TIME: 2000 - kafka1: - image: quay.io/strimzi/kafka:0.26.0-kafka-3.0.0 - hostname: kafka-1 - container_name: kafka-1 - read_only: false - entrypoint: - /bin/bash -c "mkdir -p /var/opt/kafka && chown -R kafka:0 /var/lib/kafka/data /var/opt/kafka && su kafka -p -c '/opt/kafka/kafka_run.sh'" - user: root - depends_on: - - zookeeper - ports: - - "19094:19094" - - "19093:19093" - - "19092:19092" - volumes: - - type: bind - source: ./strimzi-ca-certs - target: /opt/kafka/cluster-ca-certs - read_only: true - - type: bind - source: ./strimzi-broker-certs - target: /opt/kafka/broker-certs - read_only: true - - type: bind - source: ./strimzi-client-ca - target: /opt/kafka/client-ca-certs - read_only: true - - type: bind - source: ./strimzi-listener-certs - target: /opt/kafka/certificates/custom-mtls-9094-certs - read_only: true - - type: bind - source: ./strimzi-listener-certs - target: /opt/kafka/certificates/custom-oauth-9093-certs - read_only: true - - type: bind - source: ./strimzi-listener-certs - target: /opt/kafka/certificates/oauth-oauth-9093-certs - read_only: true - - type: bind - source: ./strimzi-kafka1-config - target: /opt/kafka/custom-config - read_only: true - - type: volume - source: kafka1-data - target: /var/lib/kafka/data - environment: - KAFKA_METRICS_ENABLED: "false" - STRIMZI_KAFKA_GC_LOG_ENABLED: "false" - KAFKA_HEAP_OPTS: "-Xms128M" - - kafka2: - image: quay.io/strimzi/kafka:0.26.0-kafka-3.0.0 - hostname: kafka-2 - container_name: kafka-2 - read_only: false - entrypoint: - /bin/bash -c "mkdir -p /var/opt/kafka && chown -R kafka:0 /var/lib/kafka/data /var/opt/kafka && su kafka -p -c '/opt/kafka/kafka_run.sh'" - user: root - depends_on: - - zookeeper - ports: - - "29094:29094" - - "29093:29093" - - "29092:29092" - volumes: - - type: bind - source: ./strimzi-ca-certs - target: /opt/kafka/cluster-ca-certs - read_only: true - - type: bind - source: ./strimzi-broker-certs - target: /opt/kafka/broker-certs - read_only: true - - type: bind - source: ./strimzi-client-ca - target: /opt/kafka/client-ca-certs - read_only: true - - type: bind - source: ./strimzi-listener-certs - target: /opt/kafka/certificates/custom-mtls-9094-certs - read_only: true - - type: bind - source: ./strimzi-listener-certs - target: /opt/kafka/certificates/custom-oauth-9093-certs - read_only: true - - type: bind - source: ./strimzi-listener-certs - target: /opt/kafka/certificates/oauth-oauth-9093-certs - read_only: true - - type: bind - source: ./strimzi-kafka2-config - target: /opt/kafka/custom-config - read_only: true - - type: volume - source: kafka2-data - target: /var/lib/kafka/data - environment: - KAFKA_METRICS_ENABLED: "false" - STRIMZI_KAFKA_GC_LOG_ENABLED: "false" - KAFKA_HEAP_OPTS: "-Xms128M" - - kafka3: - image: quay.io/strimzi/kafka:0.26.0-kafka-3.0.0 - hostname: kafka-3 - container_name: kafka-3 - read_only: false - entrypoint: - /bin/bash -c "mkdir -p /var/opt/kafka && chown -R kafka:0 /var/lib/kafka/data /var/opt/kafka && su kafka -p -c '/opt/kafka/kafka_run.sh'" - user: root - depends_on: - - zookeeper - ports: - - "39094:39094" - - "39093:39093" - - "39092:39092" - volumes: - - type: bind - source: ./strimzi-ca-certs - target: /opt/kafka/cluster-ca-certs - read_only: true - - type: bind - source: ./strimzi-broker-certs - target: /opt/kafka/broker-certs - read_only: true - - type: bind - source: ./strimzi-client-ca - target: /opt/kafka/client-ca-certs - read_only: true - - type: bind - source: ./strimzi-listener-certs - target: /opt/kafka/certificates/custom-mtls-9094-certs - read_only: true - - type: bind - source: ./strimzi-listener-certs - target: /opt/kafka/certificates/custom-oauth-9093-certs - read_only: true - - type: bind - source: ./strimzi-listener-certs - target: /opt/kafka/certificates/oauth-oauth-9093-certs - read_only: true - - type: bind - source: ./strimzi-kafka3-config - target: /opt/kafka/custom-config - read_only: true - - type: volume - source: kafka3-data - target: /var/lib/kafka/data - environment: - KAFKA_METRICS_ENABLED: "false" - STRIMZI_KAFKA_GC_LOG_ENABLED: "false" - KAFKA_HEAP_OPTS: "-Xms128M" - hydra: - image: oryd/hydra:v1.10.6-sqlite - hostname: hydra - container_name: hydra - ports: - - "4443:4443" - - "4444:4444" - read_only: false - entrypoint: hydra serve all -c /config/config.yaml --sqa-opt-out - volumes: - - type: bind - source: ./oauth-config - target: /config - read_only: true - hydra-config: - image: oryd/hydra:v1.10.6-sqlite - hostname: hydra-config - container_name: hydra-config - depends_on: - - hydra - entrypoint: | - /bin/sh -c "sleep 20;hydra clients create --skip-tls-verify -g client_credentials --id dapr -n dapr -r token -a openid,kafka --secret dapr-test; hydra clients create --skip-tls-verify -g client_credentials --id kafka -n kafka -r token -a openid --secret dapr-test" - environment: - HYDRA_ADMIN_URL: https://hydra:4444 -volumes: - kafka1-data: {} - kafka2-data: {} - kafka3-data: {} +version: "3.7" +services: + zookeeper: + image: confluentinc/cp-zookeeper:7.3.0 + hostname: zookeeper + container_name: zookeeper + ports: + - "2181:2181" + environment: + ZOOKEEPER_CLIENT_PORT: 2181 + ZOOKEEPER_TICK_TIME: 2000 + + kafka1: + image: confluentinc/cp-server:7.3.0 + hostname: kafka1 + container_name: kafka1 + depends_on: + - zookeeper + ports: + - "19092:19092" + environment: + KAFKA_BROKER_ID: 1 + KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181' + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT + KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka1:9092,PLAINTEXT_HOST://localhost:19092 + KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3 + KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 + KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: 1 + KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true' + KAFKA_NUM_PARTITIONS: 10 + + kafka2: + image: confluentinc/cp-server:7.3.0 + hostname: kafka2 + container_name: kafka2 + depends_on: + - zookeeper + ports: + - "29092:29092" + environment: + KAFKA_BROKER_ID: 2 + KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181' + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT + KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka2:9092,PLAINTEXT_HOST://localhost:29092 + KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3 + KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 + KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: 1 + KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true' + KAFKA_NUM_PARTITIONS: 10 + + kafka3: + image: confluentinc/cp-server:7.3.0 + hostname: kafka3 + container_name: kafka3 + depends_on: + - zookeeper + ports: + - "39092:39092" + environment: + KAFKA_BROKER_ID: 3 + KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181' + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT + KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka3:9092,PLAINTEXT_HOST://localhost:39092 + KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3 + KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 + KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: 1 + KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true' + KAFKA_NUM_PARTITIONS: 10 \ No newline at end of file diff --git a/tests/certification/bindings/kafka/kafka_retry_test.go b/tests/certification/bindings/kafka/kafka_retry_test.go index 5447ece7a..8082be809 100644 --- a/tests/certification/bindings/kafka/kafka_retry_test.go +++ b/tests/certification/bindings/kafka/kafka_retry_test.go @@ -1,339 +1,318 @@ -/* -Copyright 2021 The Dapr Authors -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kafka_test - -import ( - "context" - "crypto/tls" - "fmt" - "net/http" - "testing" - "time" - - "github.com/Shopify/sarama" - "github.com/cenkalti/backoff/v4" - "github.com/google/uuid" - "github.com/stretchr/testify/require" - "go.uber.org/multierr" - - // Pub/Sub. - - "github.com/dapr/components-contrib/bindings" - - // Dapr runtime and Go-SDK - "github.com/dapr/dapr/pkg/runtime" - dapr "github.com/dapr/go-sdk/client" - "github.com/dapr/go-sdk/service/common" - kit_retry "github.com/dapr/kit/retry" - - // Certification testing runnables - "github.com/dapr/components-contrib/tests/certification/embedded" - "github.com/dapr/components-contrib/tests/certification/flow" - "github.com/dapr/components-contrib/tests/certification/flow/app" - "github.com/dapr/components-contrib/tests/certification/flow/dockercompose" - "github.com/dapr/components-contrib/tests/certification/flow/network" - "github.com/dapr/components-contrib/tests/certification/flow/retry" - "github.com/dapr/components-contrib/tests/certification/flow/sidecar" - "github.com/dapr/components-contrib/tests/certification/flow/simulate" - "github.com/dapr/components-contrib/tests/certification/flow/watcher" -) - -func TestKafka_with_retry(t *testing.T) { - // For Kafka, we should ensure messages are received in order. - consumerGroup1 := watcher.NewOrdered() - // This watcher is across multiple consumers in the same group - // so exact ordering is not expected. - consumerGroup2 := watcher.NewUnordered() - - // Application logic that tracks messages from a topic. - application := func(appName string, watcher *watcher.Watcher) app.SetupFn { - return func(ctx flow.Context, s common.Service) error { - // Simulate periodic errors. - sim := simulate.PeriodicError(ctx, 100) - - // Setup the /orders event handler. - return multierr.Combine( - s.AddBindingInvocationHandler(bindingName, func(_ context.Context, in *common.BindingEvent) (out []byte, err error) { - if err := sim(); err != nil { - return nil, err - } - // Track/Observe the data of the event. - watcher.Observe(string(in.Data)) - ctx.Logf("======== %s received event: %s\n", appName, string(in.Data)) - return in.Data, nil - }), - ) - } - } - - // Set the partition key on all messages so they - // are written to the same partition. - // This allows for checking of ordered messages. - metadata := map[string]string{ - messageKey: "test", - } - - // Test logic that sends messages to a topic and - // verifies the application has received them. - sendRecvTest := func(metadata map[string]string, watchers ...*watcher.Watcher) flow.Runnable { - _, hasKey := metadata[messageKey] - return func(ctx flow.Context) error { - client := sidecar.GetClient(ctx, sidecarName1) - - // Declare what is expected BEFORE performing any steps - // that will satisfy the test. - msgs := make([]string, numMessages) - for i := range msgs { - msgs[i] = fmt.Sprintf("Hello, Messages %03d", i) - } - for _, m := range watchers { - m.ExpectStrings(msgs...) - } - // If no key it provided, create a random one. - // For Kafka, this will spread messages across - // the topic's partitions. - if !hasKey { - metadata[messageKey] = uuid.NewString() - } - - // Send events that the application above will observe. - ctx.Log("Sending messages!") - for _, msg := range msgs { - ctx.Logf("Sending: %q", msg) - err := client.InvokeOutputBinding(ctx, &dapr.InvokeBindingRequest{ - Name: bindingName, - Operation: string(bindings.CreateOperation), - Data: []byte(msg), - Metadata: metadata, - }) - require.NoError(ctx, err, "error output binding message") - } - - // Do the messages we observed match what we expect? - for _, m := range watchers { - m.Assert(ctx, time.Minute) - } - - return nil - } - } - - // sendMessagesInBackground and assertMessages are - // Runnables for testing publishing and consuming - // messages reliably when infrastructure and network - // interruptions occur. - var task flow.AsyncTask - sendMessagesInBackground := func(watchers ...*watcher.Watcher) flow.Runnable { - return func(ctx flow.Context) error { - client := sidecar.GetClient(ctx, sidecarName1) - for _, m := range watchers { - m.Reset() - } - - t := time.NewTicker(100 * time.Millisecond) - defer t.Stop() - - counter := 1 - for { - select { - case <-task.Done(): - return nil - case <-t.C: - msg := fmt.Sprintf("Background message - %03d", counter) - for _, m := range watchers { - m.Prepare(msg) // Track for observation - } - - // Publish with retries. - bo := backoff.WithContext(backoff.NewConstantBackOff(time.Second), task) - if err := kit_retry.NotifyRecover(func() error { - return client.InvokeOutputBinding(ctx, &dapr.InvokeBindingRequest{ - Name: bindingName, - Operation: string(bindings.CreateOperation), - Data: []byte(msg), - Metadata: metadata, - }) - }, bo, func(err error, t time.Duration) { - ctx.Logf("Error outpub binding message, retrying in %s", t) - }, func() {}); err == nil { - for _, m := range watchers { - m.Add(msg) // Success - } - counter++ - } else { - for _, m := range watchers { - m.Remove(msg) // Remove from Tracking - } - } - } - } - } - } - assertMessages := func(messages ...*watcher.Watcher) flow.Runnable { - return func(ctx flow.Context) error { - // Signal sendMessagesInBackground to stop and wait for it to complete. - task.CancelAndWait() - for _, m := range messages { - m.Assert(ctx, 5*time.Minute) - } - - return nil - } - } - - flow.New(t, "kafka certification with retry"). - // Run Kafka using Docker Compose. - Step(dockercompose.Run(clusterName, dockerComposeYAML)). - Step("wait for broker sockets", - network.WaitForAddresses(5*time.Minute, brokers...)). - Step("wait", flow.Sleep(5*time.Second)). - Step("wait for kafka readiness", retry.Do(10*time.Second, 30, func(ctx flow.Context) error { - config := sarama.NewConfig() - config.ClientID = "test-consumer" - config.Consumer.Return.Errors = true - - // Create new consumer - client, err := sarama.NewConsumer(brokers, config) - if err != nil { - return err - } - defer client.Close() - - // Ensure the brokers are ready by attempting to consume - // a topic partition. - _, err = client.ConsumePartition("myTopic", 0, sarama.OffsetOldest) - - return err - })). - Step("wait for Dapr OAuth client", retry.Do(20*time.Second, 6, func(ctx flow.Context) error { - httpClient := &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: &tls.Config{ - InsecureSkipVerify: true, // test server certificate is not trusted. - }, - }, - } - - resp, err := httpClient.Get(oauthClientQuery) - if err != nil { - return err - } - if resp.StatusCode != 200 { - return fmt.Errorf("oauth client query for 'dapr' not successful") - } - return nil - })). - - // Run the application logic above. - Step(app.Run(appID1, fmt.Sprintf(":%d", appPort), - application(appID1, consumerGroup1))). - // - // Run the Dapr sidecar with the Kafka component. - Step(sidecar.Run(sidecarName1, - embedded.WithComponentsPath("./components-retry/consumer1"), - embedded.WithAppProtocol(runtime.HTTPProtocol, appPort), - embedded.WithDaprGRPCPort(runtime.DefaultDaprAPIGRPCPort), - embedded.WithDaprHTTPPort(runtime.DefaultDaprHTTPPort), - componentRuntimeOptions(), - )). - // - // Run the second application. - Step(app.Run(appID2, fmt.Sprintf(":%d", appPort+portOffset), - application(appID2, consumerGroup2))). - // - // Run the Dapr sidecar with the Kafka component. - Step(sidecar.Run(sidecarName2, - embedded.WithComponentsPath("./components-retry/mtls-consumer"), - embedded.WithAppProtocol(runtime.HTTPProtocol, appPort+portOffset), - embedded.WithDaprGRPCPort(runtime.DefaultDaprAPIGRPCPort+portOffset), - embedded.WithDaprHTTPPort(runtime.DefaultDaprHTTPPort+portOffset), - embedded.WithProfilePort(runtime.DefaultProfilePort+portOffset), - componentRuntimeOptions(), - )). - // - // Send messages using the same metadata/message key so we can expect - // in-order processing. - Step("send and wait(in-order)", sendRecvTest(metadata, consumerGroup2)). - - // Run the third application. - Step(app.Run(appID3, fmt.Sprintf(":%d", appPort+portOffset*2), - application(appID3, consumerGroup2))). - // - // Run the Dapr sidecar with the Kafka component. - Step(sidecar.Run(sidecarName3, - embedded.WithComponentsPath("./components-retry/oauth-consumer"), - embedded.WithAppProtocol(runtime.HTTPProtocol, appPort+portOffset*2), - embedded.WithDaprGRPCPort(runtime.DefaultDaprAPIGRPCPort+portOffset*2), - embedded.WithDaprHTTPPort(runtime.DefaultDaprHTTPPort+portOffset*2), - embedded.WithProfilePort(runtime.DefaultProfilePort+portOffset*2), - componentRuntimeOptions(), - )). - Step("reset", flow.Reset(consumerGroup2)). - // - // Send messages with random keys to test message consumption - // across more than one consumer group and consumers per group. - Step("send and wait(no-order)", sendRecvTest(map[string]string{}, consumerGroup2)). - - // Gradually stop each broker. - // This tests the components ability to handle reconnections - // when brokers are shutdown cleanly. - StepAsync("steady flow of messages to publish", &task, - sendMessagesInBackground(consumerGroup1, consumerGroup2)). - Step("wait", flow.Sleep(5*time.Second)). - Step("stop broker 1", dockercompose.Stop(clusterName, dockerComposeYAML, "kafka1")). - Step("wait", flow.Sleep(5*time.Second)). - // - // Errors will likely start occurring here since quorum is lost. - Step("stop broker 2", dockercompose.Stop(clusterName, dockerComposeYAML, "kafka2")). - Step("wait", flow.Sleep(10*time.Second)). - // - // Errors will definitely occur here. - Step("stop broker 3", dockercompose.Stop(clusterName, dockerComposeYAML, "kafka3")). - Step("wait", flow.Sleep(30*time.Second)). - Step("restart broker 3", dockercompose.Start(clusterName, dockerComposeYAML, "kafka3")). - Step("restart broker 2", dockercompose.Start(clusterName, dockerComposeYAML, "kafka2")). - Step("restart broker 1", dockercompose.Start(clusterName, dockerComposeYAML, "kafka1")). - // - // Component should recover at this point. - Step("wait", flow.Sleep(30*time.Second)). - Step("assert messages(Component reconnect)", assertMessages(consumerGroup1, consumerGroup2)). - // - // Simulate a network interruption. - // This tests the components ability to handle reconnections - // when Dapr is disconnected abnormally. - StepAsync("steady flow of messages to publish", &task, - sendMessagesInBackground(consumerGroup1, consumerGroup2)). - Step("wait", flow.Sleep(5*time.Second)). - // - // Errors will occurring here. - Step("interrupt network", - network.InterruptNetwork(30*time.Second, nil, nil, "19092", "29092", "39092")). - // - // Component should recover at this point. - Step("wait", flow.Sleep(30*time.Second)). - Step("assert messages(network interruption)", assertMessages(consumerGroup1, consumerGroup2)). - - // Reset and test that all messages are received during a - // consumer rebalance. - Step("reset", flow.Reset(consumerGroup2)). - StepAsync("steady flow of messages to publish", &task, - sendMessagesInBackground(consumerGroup2)). - Step("wait", flow.Sleep(15*time.Second)). - Step("stop sidecar 2", sidecar.Stop(sidecarName2)). - Step("wait", flow.Sleep(3*time.Second)). - Step("stop app 2", app.Stop(appID2)). - Step("wait", flow.Sleep(30*time.Second)). - Step("assert messages(consumer rebalance)", assertMessages(consumerGroup2)). - Run() -} +/* +Copyright 2021 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kafka_test + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/Shopify/sarama" + "github.com/cenkalti/backoff/v4" + "github.com/google/uuid" + "github.com/stretchr/testify/require" + "go.uber.org/multierr" + + // Pub/Sub. + + "github.com/dapr/components-contrib/bindings" + + // Dapr runtime and Go-SDK + "github.com/dapr/dapr/pkg/runtime" + dapr "github.com/dapr/go-sdk/client" + "github.com/dapr/go-sdk/service/common" + kit_retry "github.com/dapr/kit/retry" + + // Certification testing runnables + "github.com/dapr/components-contrib/tests/certification/embedded" + "github.com/dapr/components-contrib/tests/certification/flow" + "github.com/dapr/components-contrib/tests/certification/flow/app" + "github.com/dapr/components-contrib/tests/certification/flow/dockercompose" + "github.com/dapr/components-contrib/tests/certification/flow/network" + "github.com/dapr/components-contrib/tests/certification/flow/retry" + "github.com/dapr/components-contrib/tests/certification/flow/sidecar" + "github.com/dapr/components-contrib/tests/certification/flow/simulate" + "github.com/dapr/components-contrib/tests/certification/flow/watcher" +) + +func TestKafka_with_retry(t *testing.T) { + // For Kafka, we should ensure messages are received in order. + consumerGroup1 := watcher.NewOrdered() + // This watcher is across multiple consumers in the same group + // so exact ordering is not expected. + consumerGroup2 := watcher.NewUnordered() + + // Application logic that tracks messages from a topic. + application := func(appName string, watcher *watcher.Watcher) app.SetupFn { + return func(ctx flow.Context, s common.Service) error { + // Simulate periodic errors. + sim := simulate.PeriodicError(ctx, 100) + + // Setup the /orders event handler. + return multierr.Combine( + s.AddBindingInvocationHandler(bindingName, func(_ context.Context, in *common.BindingEvent) (out []byte, err error) { + if err := sim(); err != nil { + return nil, err + } + // Track/Observe the data of the event. + watcher.Observe(string(in.Data)) + ctx.Logf("======== %s received event: %s\n", appName, string(in.Data)) + return in.Data, nil + }), + ) + } + } + + // Set the partition key on all messages so they + // are written to the same partition. + // This allows for checking of ordered messages. + metadata := map[string]string{ + messageKey: "test", + } + + // Test logic that sends messages to a topic and + // verifies the application has received them. + sendRecvTest := func(metadata map[string]string, watchers ...*watcher.Watcher) flow.Runnable { + _, hasKey := metadata[messageKey] + return func(ctx flow.Context) error { + client := sidecar.GetClient(ctx, sidecarName1) + + // Declare what is expected BEFORE performing any steps + // that will satisfy the test. + msgs := make([]string, numMessages) + for i := range msgs { + msgs[i] = fmt.Sprintf("Hello, Messages %03d", i) + } + for _, m := range watchers { + m.ExpectStrings(msgs...) + } + // If no key it provided, create a random one. + // For Kafka, this will spread messages across + // the topic's partitions. + if !hasKey { + metadata[messageKey] = uuid.NewString() + } + + // Send events that the application above will observe. + ctx.Log("Sending messages!") + for _, msg := range msgs { + ctx.Logf("Sending: %q", msg) + err := client.InvokeOutputBinding(ctx, &dapr.InvokeBindingRequest{ + Name: bindingName, + Operation: string(bindings.CreateOperation), + Data: []byte(msg), + Metadata: metadata, + }) + require.NoError(ctx, err, "error output binding message") + } + + // Do the messages we observed match what we expect? + for _, m := range watchers { + m.Assert(ctx, time.Minute) + } + + return nil + } + } + + // sendMessagesInBackground and assertMessages are + // Runnables for testing publishing and consuming + // messages reliably when infrastructure and network + // interruptions occur. + var task flow.AsyncTask + sendMessagesInBackground := func(watchers ...*watcher.Watcher) flow.Runnable { + return func(ctx flow.Context) error { + client := sidecar.GetClient(ctx, sidecarName1) + for _, m := range watchers { + m.Reset() + } + + t := time.NewTicker(100 * time.Millisecond) + defer t.Stop() + + counter := 1 + for { + select { + case <-task.Done(): + return nil + case <-t.C: + msg := fmt.Sprintf("Background message - %03d", counter) + for _, m := range watchers { + m.Prepare(msg) // Track for observation + } + + // Publish with retries. + bo := backoff.WithContext(backoff.NewConstantBackOff(time.Second), task) + if err := kit_retry.NotifyRecover(func() error { + return client.InvokeOutputBinding(ctx, &dapr.InvokeBindingRequest{ + Name: bindingName, + Operation: string(bindings.CreateOperation), + Data: []byte(msg), + Metadata: metadata, + }) + }, bo, func(err error, t time.Duration) { + ctx.Logf("Error outpub binding message, retrying in %s", t) + }, func() {}); err == nil { + for _, m := range watchers { + m.Add(msg) // Success + } + counter++ + } else { + for _, m := range watchers { + m.Remove(msg) // Remove from Tracking + } + } + } + } + } + } + assertMessages := func(messages ...*watcher.Watcher) flow.Runnable { + return func(ctx flow.Context) error { + // Signal sendMessagesInBackground to stop and wait for it to complete. + task.CancelAndWait() + for _, m := range messages { + m.Assert(ctx, 5*time.Minute) + } + + return nil + } + } + + flow.New(t, "kafka certification with retry"). + // Run Kafka using Docker Compose. + Step(dockercompose.Run(clusterName, dockerComposeYAML)). + Step("wait for broker sockets", + network.WaitForAddresses(5*time.Minute, brokers...)). + Step("wait", flow.Sleep(5*time.Second)). + Step("wait for kafka readiness", retry.Do(10*time.Second, 30, func(ctx flow.Context) error { + config := sarama.NewConfig() + config.ClientID = "test-consumer" + config.Consumer.Return.Errors = true + + // Create new consumer + client, err := sarama.NewConsumer(brokers, config) + if err != nil { + return err + } + defer client.Close() + + // Ensure the brokers are ready by attempting to consume + // a topic partition. + _, err = client.ConsumePartition("myTopic", 0, sarama.OffsetOldest) + + return err + })). + // Run the application logic above. + Step(app.Run(appID1, fmt.Sprintf(":%d", appPort), + application(appID1, consumerGroup1))). + // + // Run the Dapr sidecar with the Kafka component. + Step(sidecar.Run(sidecarName1, + embedded.WithComponentsPath("./components-retry/consumer1"), + embedded.WithAppProtocol(runtime.HTTPProtocol, appPort), + embedded.WithDaprGRPCPort(runtime.DefaultDaprAPIGRPCPort), + embedded.WithDaprHTTPPort(runtime.DefaultDaprHTTPPort), + componentRuntimeOptions(), + )). + // + // Run the second application. + Step(app.Run(appID2, fmt.Sprintf(":%d", appPort+portOffset), + application(appID2, consumerGroup2))). + // + // Run the Dapr sidecar with the Kafka component. + Step(sidecar.Run(sidecarName2, + embedded.WithComponentsPath("./components-retry/consumer2"), + embedded.WithAppProtocol(runtime.HTTPProtocol, appPort+portOffset), + embedded.WithDaprGRPCPort(runtime.DefaultDaprAPIGRPCPort+portOffset), + embedded.WithDaprHTTPPort(runtime.DefaultDaprHTTPPort+portOffset), + embedded.WithProfilePort(runtime.DefaultProfilePort+portOffset), + componentRuntimeOptions(), + )). + // + // Send messages using the same metadata/message key so we can expect + // in-order processing. + Step("send and wait(in-order)", sendRecvTest(metadata, consumerGroup2)). + + // Run the third application. + Step(app.Run(appID3, fmt.Sprintf(":%d", appPort+portOffset*2), + application(appID3, consumerGroup2))). + // + // Run the Dapr sidecar with the Kafka component. + Step(sidecar.Run(sidecarName3, + embedded.WithComponentsPath("./components-retry/consumer2"), + embedded.WithAppProtocol(runtime.HTTPProtocol, appPort+portOffset*2), + embedded.WithDaprGRPCPort(runtime.DefaultDaprAPIGRPCPort+portOffset*2), + embedded.WithDaprHTTPPort(runtime.DefaultDaprHTTPPort+portOffset*2), + embedded.WithProfilePort(runtime.DefaultProfilePort+portOffset*2), + componentRuntimeOptions(), + )). + Step("reset", flow.Reset(consumerGroup2)). + // + // Send messages with random keys to test message consumption + // across more than one consumer group and consumers per group. + Step("send and wait(no-order)", sendRecvTest(map[string]string{}, consumerGroup2)). + + // Gradually stop each broker. + // This tests the components ability to handle reconnections + // when brokers are shutdown cleanly. + StepAsync("steady flow of messages to publish", &task, + sendMessagesInBackground(consumerGroup1, consumerGroup2)). + Step("wait", flow.Sleep(5*time.Second)). + Step("stop broker 1", dockercompose.Stop(clusterName, dockerComposeYAML, "kafka1")). + Step("wait", flow.Sleep(5*time.Second)). + // + // Errors will likely start occurring here since quorum is lost. + Step("stop broker 2", dockercompose.Stop(clusterName, dockerComposeYAML, "kafka2")). + Step("wait", flow.Sleep(10*time.Second)). + // + // Errors will definitely occur here. + Step("stop broker 3", dockercompose.Stop(clusterName, dockerComposeYAML, "kafka3")). + Step("wait", flow.Sleep(30*time.Second)). + Step("restart broker 3", dockercompose.Start(clusterName, dockerComposeYAML, "kafka3")). + Step("restart broker 2", dockercompose.Start(clusterName, dockerComposeYAML, "kafka2")). + Step("restart broker 1", dockercompose.Start(clusterName, dockerComposeYAML, "kafka1")). + // + // Component should recover at this point. + Step("wait", flow.Sleep(30*time.Second)). + Step("assert messages(Component reconnect)", assertMessages(consumerGroup1, consumerGroup2)). + // + // Simulate a network interruption. + // This tests the components ability to handle reconnections + // when Dapr is disconnected abnormally. + StepAsync("steady flow of messages to publish", &task, + sendMessagesInBackground(consumerGroup1, consumerGroup2)). + Step("wait", flow.Sleep(5*time.Second)). + // + // Errors will occurring here. + Step("interrupt network", + network.InterruptNetwork(30*time.Second, nil, nil, "19092", "29092", "39092")). + // + // Component should recover at this point. + Step("wait", flow.Sleep(30*time.Second)). + Step("assert messages(network interruption)", assertMessages(consumerGroup1, consumerGroup2)). + + // Reset and test that all messages are received during a + // consumer rebalance. + Step("reset", flow.Reset(consumerGroup2)). + StepAsync("steady flow of messages to publish", &task, + sendMessagesInBackground(consumerGroup2)). + Step("wait", flow.Sleep(15*time.Second)). + Step("stop sidecar 2", sidecar.Stop(sidecarName2)). + Step("wait", flow.Sleep(3*time.Second)). + Step("stop app 2", app.Stop(appID2)). + Step("wait", flow.Sleep(30*time.Second)). + Step("assert messages(consumer rebalance)", assertMessages(consumerGroup2)). + Run() +} diff --git a/tests/certification/bindings/kafka/kafka_test.go b/tests/certification/bindings/kafka/kafka_test.go index 674f19648..a54388d2c 100644 --- a/tests/certification/bindings/kafka/kafka_test.go +++ b/tests/certification/bindings/kafka/kafka_test.go @@ -1,374 +1,352 @@ -/* -Copyright 2021 The Dapr Authors -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kafka_test - -import ( - "context" - "crypto/tls" - "fmt" - "net/http" - "testing" - "time" - - "github.com/Shopify/sarama" - "github.com/cenkalti/backoff/v4" - "github.com/google/uuid" - "github.com/stretchr/testify/require" - "go.uber.org/multierr" - - // Pub/Sub. - - "github.com/dapr/components-contrib/bindings" - bindings_kafka "github.com/dapr/components-contrib/bindings/kafka" - bindings_loader "github.com/dapr/dapr/pkg/components/bindings" - - // Dapr runtime and Go-SDK - "github.com/dapr/dapr/pkg/runtime" - dapr "github.com/dapr/go-sdk/client" - "github.com/dapr/go-sdk/service/common" - "github.com/dapr/kit/logger" - kit_retry "github.com/dapr/kit/retry" - - // Certification testing runnables - "github.com/dapr/components-contrib/tests/certification/embedded" - "github.com/dapr/components-contrib/tests/certification/flow" - "github.com/dapr/components-contrib/tests/certification/flow/app" - "github.com/dapr/components-contrib/tests/certification/flow/dockercompose" - "github.com/dapr/components-contrib/tests/certification/flow/network" - "github.com/dapr/components-contrib/tests/certification/flow/retry" - "github.com/dapr/components-contrib/tests/certification/flow/sidecar" - "github.com/dapr/components-contrib/tests/certification/flow/watcher" -) - -const ( - sidecarName1 = "dapr-1" - sidecarName2 = "dapr-2" - sidecarName3 = "dapr-3" - appID1 = "app-1" - appID2 = "app-2" - appID3 = "app-3" - clusterName = "kafkacertification" - dockerComposeYAML = "docker-compose.yml" - numMessages = 1000 - appPort = 8000 - portOffset = 2 - messageKey = "partitionKey" - - bindingName = "messagebus" - topicName = "neworder" -) - -var ( - brokers = []string{"localhost:19092", "localhost:29092", "localhost:39092"} - oauthClientQuery = "https://localhost:4444/clients/dapr" -) - -func TestKafka(t *testing.T) { - // For Kafka, we should ensure messages are received in order. - consumerGroup1 := watcher.NewOrdered() - // This watcher is across multiple consumers in the same group - // so exact ordering is not expected. - consumerGroup2 := watcher.NewUnordered() - - // Application logic that tracks messages from a topic. - application := func(appName string, watcher *watcher.Watcher) app.SetupFn { - return func(ctx flow.Context, s common.Service) error { - // Setup the /orders event handler. - return multierr.Combine( - s.AddBindingInvocationHandler(bindingName, func(_ context.Context, in *common.BindingEvent) (out []byte, err error) { - // Track/Observe the data of the event. - watcher.Observe(string(in.Data)) - ctx.Logf("======== %s received event: %s\n", appName, string(in.Data)) - return in.Data, nil - }), - ) - } - } - - // Set the partition key on all messages so they - // are written to the same partition. - // This allows for checking of ordered messages. - metadata := map[string]string{ - messageKey: "test", - } - - // Test logic that sends messages to a topic and - // verifies the application has received them. - sendRecvTest := func(metadata map[string]string, watchers ...*watcher.Watcher) flow.Runnable { - _, hasKey := metadata[messageKey] - return func(ctx flow.Context) error { - client := sidecar.GetClient(ctx, sidecarName1) - - // Declare what is expected BEFORE performing any steps - // that will satisfy the test. - msgs := make([]string, numMessages) - for i := range msgs { - msgs[i] = fmt.Sprintf("Hello, Messages %03d", i) - } - for _, m := range watchers { - m.ExpectStrings(msgs...) - } - // If no key it provided, create a random one. - // For Kafka, this will spread messages across - // the topic's partitions. - if !hasKey { - metadata[messageKey] = uuid.NewString() - } - - // Send events that the application above will observe. - ctx.Log("Sending messages!") - for _, msg := range msgs { - err := client.InvokeOutputBinding(ctx, &dapr.InvokeBindingRequest{ - Name: bindingName, - Operation: string(bindings.CreateOperation), - Data: []byte(msg), - Metadata: metadata, - }) - require.NoError(ctx, err, "error publishing message") - } - - // Do the messages we observed match what we expect? - for _, m := range watchers { - m.Assert(ctx, time.Minute) - } - - return nil - } - } - - // sendMessagesInBackground and assertMessages are - // Runnables for testing publishing and consuming - // messages reliably when infrastructure and network - // interruptions occur. - var task flow.AsyncTask - sendMessagesInBackground := func(watchers ...*watcher.Watcher) flow.Runnable { - return func(ctx flow.Context) error { - client := sidecar.GetClient(ctx, sidecarName1) - for _, m := range watchers { - m.Reset() - } - - t := time.NewTicker(100 * time.Millisecond) - defer t.Stop() - - counter := 1 - for { - select { - case <-task.Done(): - return nil - case <-t.C: - msg := fmt.Sprintf("Background message - %03d", counter) - for _, m := range watchers { - m.Prepare(msg) // Track for observation - } - - // Publish with retries. - bo := backoff.WithContext(backoff.NewConstantBackOff(time.Second), task) - if err := kit_retry.NotifyRecover(func() error { - return client.InvokeOutputBinding(ctx, &dapr.InvokeBindingRequest{ - Name: bindingName, - Operation: string(bindings.CreateOperation), - Data: []byte(msg), - Metadata: metadata, - }) - }, bo, func(err error, t time.Duration) { - ctx.Logf("Error outpub binding message, retrying in %s", t) - }, func() {}); err == nil { - for _, m := range watchers { - m.Add(msg) // Success - } - counter++ - } else { - for _, m := range watchers { - m.Remove(msg) // Remove from Tracking - } - } - } - } - } - } - assertMessages := func(watchers ...*watcher.Watcher) flow.Runnable { - return func(ctx flow.Context) error { - // Signal sendMessagesInBackground to stop and wait for it to complete. - task.CancelAndWait() - for _, m := range watchers { - m.Assert(ctx, 5*time.Minute) - } - - return nil - } - } - - flow.New(t, "kafka certification"). - // Run Kafka using Docker Compose. - Step(dockercompose.Run(clusterName, dockerComposeYAML)). - Step("wait for broker sockets", - network.WaitForAddresses(5*time.Minute, brokers...)). - Step("wait", flow.Sleep(5*time.Second)). - Step("wait for kafka readiness", retry.Do(10*time.Second, 30, func(ctx flow.Context) error { - config := sarama.NewConfig() - config.ClientID = "test-consumer" - config.Consumer.Return.Errors = true - - // Create new consumer - client, err := sarama.NewConsumer(brokers, config) - if err != nil { - return err - } - defer client.Close() - - // Ensure the brokers are ready by attempting to consume - // a topic partition. - _, err = client.ConsumePartition("myTopic", 0, sarama.OffsetOldest) - - return err - })). - Step("wait for Dapr OAuth client", retry.Do(20*time.Second, 6, func(ctx flow.Context) error { - httpClient := &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: &tls.Config{ - InsecureSkipVerify: true, // test server certificate is not trusted. - }, - }, - } - - resp, err := httpClient.Get(oauthClientQuery) - if err != nil { - return err - } - if resp.StatusCode != 200 { - return fmt.Errorf("oauth client query for 'dapr' not successful") - } - return nil - })). - // - // Run the application logic above. - Step(app.Run(appID1, fmt.Sprintf(":%d", appPort), - application(appID1, consumerGroup1))). - // - // Run the Dapr sidecar with the Kafka component. - Step(sidecar.Run(sidecarName1, - embedded.WithComponentsPath("./components/consumer1"), - embedded.WithAppProtocol(runtime.HTTPProtocol, appPort), - embedded.WithDaprGRPCPort(runtime.DefaultDaprAPIGRPCPort), - embedded.WithDaprHTTPPort(runtime.DefaultDaprHTTPPort), - componentRuntimeOptions(), - )). - // - // Run the second application. - Step(app.Run(appID2, fmt.Sprintf(":%d", appPort+portOffset), - application(appID2, consumerGroup2))). - // - // Run the Dapr sidecar with the Kafka component. - Step(sidecar.Run(sidecarName2, - embedded.WithComponentsPath("./components/mtls-consumer"), - embedded.WithAppProtocol(runtime.HTTPProtocol, appPort+portOffset), - embedded.WithDaprGRPCPort(runtime.DefaultDaprAPIGRPCPort+portOffset), - embedded.WithDaprHTTPPort(runtime.DefaultDaprHTTPPort+portOffset), - embedded.WithProfilePort(runtime.DefaultProfilePort+portOffset), - componentRuntimeOptions(), - )). - // - // Send messages using the same metadata/message key so we can expect - // in-order processing. - Step("send and wait(in-order)", sendRecvTest(metadata, consumerGroup1, consumerGroup2)). - // - // Run the third application. - Step(app.Run(appID3, fmt.Sprintf(":%d", appPort+portOffset*2), - application(appID3, consumerGroup2))). - // - // Run the Dapr sidecar with the Kafka component. - Step(sidecar.Run(sidecarName3, - embedded.WithComponentsPath("./components/oauth-consumer"), - embedded.WithAppProtocol(runtime.HTTPProtocol, appPort+portOffset*2), - embedded.WithDaprGRPCPort(runtime.DefaultDaprAPIGRPCPort+portOffset*2), - embedded.WithDaprHTTPPort(runtime.DefaultDaprHTTPPort+portOffset*2), - embedded.WithProfilePort(runtime.DefaultProfilePort+portOffset*2), - componentRuntimeOptions(), - )). - Step("reset", flow.Reset(consumerGroup2)). - // - // Send messages with random keys to test message consumption - // across more than one consumer group and consumers per group. - Step("send and wait(no-order)", sendRecvTest(map[string]string{}, consumerGroup2)). - // - // Gradually stop each broker. - // This tests the components ability to handle reconnections - // when brokers are shutdown cleanly. - StepAsync("steady flow of messages to publish", &task, - sendMessagesInBackground(consumerGroup1, consumerGroup2)). - Step("wait", flow.Sleep(5*time.Second)). - Step("stop broker 1", dockercompose.Stop(clusterName, dockerComposeYAML, "kafka1")). - Step("wait", flow.Sleep(5*time.Second)). - // - // Errors will likely start occurring here since quorum is lost. - Step("stop broker 2", dockercompose.Stop(clusterName, dockerComposeYAML, "kafka2")). - Step("wait", flow.Sleep(10*time.Second)). - // - // Errors will definitely occur here. - Step("stop broker 3", dockercompose.Stop(clusterName, dockerComposeYAML, "kafka3")). - Step("wait", flow.Sleep(30*time.Second)). - Step("restart broker 3", dockercompose.Start(clusterName, dockerComposeYAML, "kafka3")). - Step("restart broker 2", dockercompose.Start(clusterName, dockerComposeYAML, "kafka2")). - Step("restart broker 1", dockercompose.Start(clusterName, dockerComposeYAML, "kafka1")). - // - // Component should recover at this point. - Step("wait", flow.Sleep(30*time.Second)). - Step("assert messages(Component reconnect)", assertMessages(consumerGroup1, consumerGroup2)). - // - // Simulate a network interruption. - // This tests the components ability to handle reconnections - // when Dapr is disconnected abnormally. - StepAsync("steady flow of messages to publish", &task, - sendMessagesInBackground(consumerGroup1, consumerGroup2)). - Step("wait", flow.Sleep(5*time.Second)). - // - // Errors will occurring here. - Step("interrupt network", - network.InterruptNetwork(30*time.Second, nil, nil, "19092", "29092", "39092")). - // - // Component should recover at this point. - Step("wait", flow.Sleep(30*time.Second)). - Step("assert messages(network interruption)", assertMessages(consumerGroup1, consumerGroup2)). - // - // Reset and test that all messages are received during a - // consumer rebalance. - Step("reset", flow.Reset(consumerGroup2)). - StepAsync("steady flow of messages to publish", &task, - sendMessagesInBackground(consumerGroup2)). - Step("wait", flow.Sleep(15*time.Second)). - Step("stop sidecar 2", sidecar.Stop(sidecarName2)). - Step("wait", flow.Sleep(3*time.Second)). - Step("stop app 2", app.Stop(appID2)). - Step("wait", flow.Sleep(30*time.Second)). - Step("assert messages(consumer rebalance)", assertMessages(consumerGroup2)). - Run() -} - -func componentRuntimeOptions() []runtime.Option { - log := logger.NewLogger("dapr.components") - - bindingsRegistry := bindings_loader.NewRegistry() - bindingsRegistry.Logger = log - bindingsRegistry.RegisterInputBinding(func(l logger.Logger) bindings.InputBinding { - return bindings_kafka.NewKafka(l) - }, "kafka") - bindingsRegistry.RegisterOutputBinding(func(l logger.Logger) bindings.OutputBinding { - return bindings_kafka.NewKafka(l) - }, "kafka") - - return []runtime.Option{ - runtime.WithBindings(bindingsRegistry), - } -} +/* +Copyright 2021 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kafka_test + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/Shopify/sarama" + "github.com/cenkalti/backoff/v4" + "github.com/google/uuid" + "github.com/stretchr/testify/require" + "go.uber.org/multierr" + + // Pub/Sub. + + "github.com/dapr/components-contrib/bindings" + bindings_kafka "github.com/dapr/components-contrib/bindings/kafka" + bindings_loader "github.com/dapr/dapr/pkg/components/bindings" + + // Dapr runtime and Go-SDK + "github.com/dapr/dapr/pkg/runtime" + dapr "github.com/dapr/go-sdk/client" + "github.com/dapr/go-sdk/service/common" + "github.com/dapr/kit/logger" + kit_retry "github.com/dapr/kit/retry" + + // Certification testing runnables + "github.com/dapr/components-contrib/tests/certification/embedded" + "github.com/dapr/components-contrib/tests/certification/flow" + "github.com/dapr/components-contrib/tests/certification/flow/app" + "github.com/dapr/components-contrib/tests/certification/flow/dockercompose" + "github.com/dapr/components-contrib/tests/certification/flow/network" + "github.com/dapr/components-contrib/tests/certification/flow/retry" + "github.com/dapr/components-contrib/tests/certification/flow/sidecar" + "github.com/dapr/components-contrib/tests/certification/flow/watcher" +) + +const ( + sidecarName1 = "dapr-1" + sidecarName2 = "dapr-2" + sidecarName3 = "dapr-3" + appID1 = "app-1" + appID2 = "app-2" + appID3 = "app-3" + clusterName = "kafkacertification" + dockerComposeYAML = "docker-compose.yml" + numMessages = 1000 + appPort = 8000 + portOffset = 2 + messageKey = "partitionKey" + + bindingName = "messagebus" + topicName = "neworder" +) + +var ( + brokers = []string{"localhost:19092", "localhost:29092", "localhost:39092"} +) + +func TestKafka(t *testing.T) { + // For Kafka, we should ensure messages are received in order. + consumerGroup1 := watcher.NewOrdered() + // This watcher is across multiple consumers in the same group + // so exact ordering is not expected. + consumerGroup2 := watcher.NewUnordered() + + // Application logic that tracks messages from a topic. + application := func(appName string, watcher *watcher.Watcher) app.SetupFn { + return func(ctx flow.Context, s common.Service) error { + // Setup the /orders event handler. + return multierr.Combine( + s.AddBindingInvocationHandler(bindingName, func(_ context.Context, in *common.BindingEvent) (out []byte, err error) { + // Track/Observe the data of the event. + watcher.Observe(string(in.Data)) + ctx.Logf("======== %s received event: %s\n", appName, string(in.Data)) + return in.Data, nil + }), + ) + } + } + + // Set the partition key on all messages so they + // are written to the same partition. + // This allows for checking of ordered messages. + metadata := map[string]string{ + messageKey: "test", + } + + // Test logic that sends messages to a topic and + // verifies the application has received them. + sendRecvTest := func(metadata map[string]string, watchers ...*watcher.Watcher) flow.Runnable { + _, hasKey := metadata[messageKey] + return func(ctx flow.Context) error { + client := sidecar.GetClient(ctx, sidecarName1) + + // Declare what is expected BEFORE performing any steps + // that will satisfy the test. + msgs := make([]string, numMessages) + for i := range msgs { + msgs[i] = fmt.Sprintf("Hello, Messages %03d", i) + } + for _, m := range watchers { + m.ExpectStrings(msgs...) + } + // If no key it provided, create a random one. + // For Kafka, this will spread messages across + // the topic's partitions. + if !hasKey { + metadata[messageKey] = uuid.NewString() + } + + // Send events that the application above will observe. + ctx.Log("Sending messages!") + for _, msg := range msgs { + err := client.InvokeOutputBinding(ctx, &dapr.InvokeBindingRequest{ + Name: bindingName, + Operation: string(bindings.CreateOperation), + Data: []byte(msg), + Metadata: metadata, + }) + require.NoError(ctx, err, "error publishing message") + } + + // Do the messages we observed match what we expect? + for _, m := range watchers { + m.Assert(ctx, time.Minute) + } + + return nil + } + } + + // sendMessagesInBackground and assertMessages are + // Runnables for testing publishing and consuming + // messages reliably when infrastructure and network + // interruptions occur. + var task flow.AsyncTask + sendMessagesInBackground := func(watchers ...*watcher.Watcher) flow.Runnable { + return func(ctx flow.Context) error { + client := sidecar.GetClient(ctx, sidecarName1) + for _, m := range watchers { + m.Reset() + } + + t := time.NewTicker(100 * time.Millisecond) + defer t.Stop() + + counter := 1 + for { + select { + case <-task.Done(): + return nil + case <-t.C: + msg := fmt.Sprintf("Background message - %03d", counter) + for _, m := range watchers { + m.Prepare(msg) // Track for observation + } + + // Publish with retries. + bo := backoff.WithContext(backoff.NewConstantBackOff(time.Second), task) + if err := kit_retry.NotifyRecover(func() error { + return client.InvokeOutputBinding(ctx, &dapr.InvokeBindingRequest{ + Name: bindingName, + Operation: string(bindings.CreateOperation), + Data: []byte(msg), + Metadata: metadata, + }) + }, bo, func(err error, t time.Duration) { + ctx.Logf("Error outpub binding message, retrying in %s", t) + }, func() {}); err == nil { + for _, m := range watchers { + m.Add(msg) // Success + } + counter++ + } else { + for _, m := range watchers { + m.Remove(msg) // Remove from Tracking + } + } + } + } + } + } + assertMessages := func(watchers ...*watcher.Watcher) flow.Runnable { + return func(ctx flow.Context) error { + // Signal sendMessagesInBackground to stop and wait for it to complete. + task.CancelAndWait() + for _, m := range watchers { + m.Assert(ctx, 5*time.Minute) + } + + return nil + } + } + + flow.New(t, "kafka certification"). + // Run Kafka using Docker Compose. + Step(dockercompose.Run(clusterName, dockerComposeYAML)). + Step("wait for broker sockets", + network.WaitForAddresses(5*time.Minute, brokers...)). + Step("wait", flow.Sleep(5*time.Second)). + Step("wait for kafka readiness", retry.Do(10*time.Second, 30, func(ctx flow.Context) error { + config := sarama.NewConfig() + config.ClientID = "test-consumer" + config.Consumer.Return.Errors = true + + // Create new consumer + client, err := sarama.NewConsumer(brokers, config) + if err != nil { + return err + } + defer client.Close() + + // Ensure the brokers are ready by attempting to consume + // a topic partition. + _, err = client.ConsumePartition("myTopic", 0, sarama.OffsetOldest) + + return err + })). + // Run the application logic above. + Step(app.Run(appID1, fmt.Sprintf(":%d", appPort), + application(appID1, consumerGroup1))). + // + // Run the Dapr sidecar with the Kafka component. + Step(sidecar.Run(sidecarName1, + embedded.WithComponentsPath("./components/consumer1"), + embedded.WithAppProtocol(runtime.HTTPProtocol, appPort), + embedded.WithDaprGRPCPort(runtime.DefaultDaprAPIGRPCPort), + embedded.WithDaprHTTPPort(runtime.DefaultDaprHTTPPort), + componentRuntimeOptions(), + )). + // + // Run the second application. + Step(app.Run(appID2, fmt.Sprintf(":%d", appPort+portOffset), + application(appID2, consumerGroup2))). + // + // Run the Dapr sidecar with the Kafka component. + Step(sidecar.Run(sidecarName2, + embedded.WithComponentsPath("./components/consumer2"), + embedded.WithAppProtocol(runtime.HTTPProtocol, appPort+portOffset), + embedded.WithDaprGRPCPort(runtime.DefaultDaprAPIGRPCPort+portOffset), + embedded.WithDaprHTTPPort(runtime.DefaultDaprHTTPPort+portOffset), + embedded.WithProfilePort(runtime.DefaultProfilePort+portOffset), + componentRuntimeOptions(), + )). + // + // Send messages using the same metadata/message key so we can expect + // in-order processing. + Step("send and wait(in-order)", sendRecvTest(metadata, consumerGroup1, consumerGroup2)). + // + // Run the third application. + Step(app.Run(appID3, fmt.Sprintf(":%d", appPort+portOffset*2), + application(appID3, consumerGroup2))). + // + // Run the Dapr sidecar with the Kafka component. + Step(sidecar.Run(sidecarName3, + embedded.WithComponentsPath("./components/consumer2"), + embedded.WithAppProtocol(runtime.HTTPProtocol, appPort+portOffset*2), + embedded.WithDaprGRPCPort(runtime.DefaultDaprAPIGRPCPort+portOffset*2), + embedded.WithDaprHTTPPort(runtime.DefaultDaprHTTPPort+portOffset*2), + embedded.WithProfilePort(runtime.DefaultProfilePort+portOffset*2), + componentRuntimeOptions(), + )). + Step("reset", flow.Reset(consumerGroup2)). + // + // Send messages with random keys to test message consumption + // across more than one consumer group and consumers per group. + Step("send and wait(no-order)", sendRecvTest(map[string]string{}, consumerGroup2)). + // + // Gradually stop each broker. + // This tests the components ability to handle reconnections + // when brokers are shutdown cleanly. + StepAsync("steady flow of messages to publish", &task, + sendMessagesInBackground(consumerGroup1, consumerGroup2)). + Step("wait", flow.Sleep(5*time.Second)). + Step("stop broker 1", dockercompose.Stop(clusterName, dockerComposeYAML, "kafka1")). + Step("wait", flow.Sleep(5*time.Second)). + // + // Errors will likely start occurring here since quorum is lost. + Step("stop broker 2", dockercompose.Stop(clusterName, dockerComposeYAML, "kafka2")). + Step("wait", flow.Sleep(10*time.Second)). + // + // Errors will definitely occur here. + Step("stop broker 3", dockercompose.Stop(clusterName, dockerComposeYAML, "kafka3")). + Step("wait", flow.Sleep(30*time.Second)). + Step("restart broker 3", dockercompose.Start(clusterName, dockerComposeYAML, "kafka3")). + Step("restart broker 2", dockercompose.Start(clusterName, dockerComposeYAML, "kafka2")). + Step("restart broker 1", dockercompose.Start(clusterName, dockerComposeYAML, "kafka1")). + // + // Component should recover at this point. + Step("wait", flow.Sleep(30*time.Second)). + Step("assert messages(Component reconnect)", assertMessages(consumerGroup1, consumerGroup2)). + // + // Simulate a network interruption. + // This tests the components ability to handle reconnections + // when Dapr is disconnected abnormally. + StepAsync("steady flow of messages to publish", &task, + sendMessagesInBackground(consumerGroup1, consumerGroup2)). + Step("wait", flow.Sleep(5*time.Second)). + // + // Errors will occurring here. + Step("interrupt network", + network.InterruptNetwork(30*time.Second, nil, nil, "19092", "29092", "39092")). + // + // Component should recover at this point. + Step("wait", flow.Sleep(30*time.Second)). + Step("assert messages(network interruption)", assertMessages(consumerGroup1, consumerGroup2)). + // + // Reset and test that all messages are received during a + // consumer rebalance. + Step("reset", flow.Reset(consumerGroup2)). + StepAsync("steady flow of messages to publish", &task, + sendMessagesInBackground(consumerGroup2)). + Step("wait", flow.Sleep(15*time.Second)). + Step("stop sidecar 2", sidecar.Stop(sidecarName2)). + Step("wait", flow.Sleep(3*time.Second)). + Step("stop app 2", app.Stop(appID2)). + Step("wait", flow.Sleep(30*time.Second)). + Step("assert messages(consumer rebalance)", assertMessages(consumerGroup2)). + Run() +} + +func componentRuntimeOptions() []runtime.Option { + log := logger.NewLogger("dapr.components") + + bindingsRegistry := bindings_loader.NewRegistry() + bindingsRegistry.Logger = log + bindingsRegistry.RegisterInputBinding(func(l logger.Logger) bindings.InputBinding { + return bindings_kafka.NewKafka(l) + }, "kafka") + bindingsRegistry.RegisterOutputBinding(func(l logger.Logger) bindings.OutputBinding { + return bindings_kafka.NewKafka(l) + }, "kafka") + + return []runtime.Option{ + runtime.WithBindings(bindingsRegistry), + } +} diff --git a/tests/certification/bindings/kafka/oauth-config/config.yaml b/tests/certification/bindings/kafka/oauth-config/config.yaml deleted file mode 100644 index 520d17eb1..000000000 --- a/tests/certification/bindings/kafka/oauth-config/config.yaml +++ /dev/null @@ -1,22 +0,0 @@ -serve: - admin: - host: 0.0.0.0 - port: 4444 - public: - host: 0.0.0.0 - port: 4443 - tls: - cert: - path: /config/tls/hydra.crt - key: - path: /config/tls/hydra.key -dsn: memory -log: - leak_sensitive_values: true - level: debug -urls: - self: - issuer: https://hydra:4443 -strategies: - access_token: opaque - diff --git a/tests/certification/bindings/kafka/oauth-config/tls/hydra.crt b/tests/certification/bindings/kafka/oauth-config/tls/hydra.crt deleted file mode 100644 index ad12c1cbd..000000000 --- a/tests/certification/bindings/kafka/oauth-config/tls/hydra.crt +++ /dev/null @@ -1,21 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDejCCAmKgAwIBAgIUIgMF15XiDisW+e4I+clKWYvxcfMwDQYJKoZIhvcNAQEL -BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew -HhcNMjExMjEwMDA1ODAwWhcNMjIxMjEwMDA1ODAwWjAjMRIwEAYDVQQKEwlEYXBy -IFRlc3QxDTALBgNVBAMTBGRhcHIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK -AoIBAQDICbhBmpxFPFtoRTjdiki2ouZQbUoHE4llIQnJz3ta/+gWi/czrOmC3aHz -x9pJ1kifBG5MlbdnH8WCQXx/vPXP5hpTmTDjAp87Fygk2KWdb/bQBrpRTIEgAuK3 -IWJ9tYhcDDxSwEF52xNnRkklxZpVRZX1SmcdndEqioaAnxWEM1x+JJcjrk6Ud4dv -aX0G1xw8g6u0KT1I61Aja2OAAj+iPih6RK6xSRdxvELXbehClBHOpJP6sRw03Xw4 -HRJEesWqrGAFEp0qSZulKwn2MHAW80VVF/U9hogUQrBVFTKw/5oS9eu+BV2AY3Rh -8DACB0blpEkjIachjjo2A8wuhBeNAgMBAAGjgZ0wgZowDgYDVR0PAQH/BAQDAgWg -MB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0G -A1UdDgQWBBRVxfGJ7a+7DBz2PM2w/U5aeJFOfjAfBgNVHSMEGDAWgBR+l/nJVNA+ -PUmfXs1kYJbBfN4JbzAbBgNVHREEFDASggVoeWRyYYIJbG9jYWxob3N0MA0GCSqG -SIb3DQEBCwUAA4IBAQA+0zkBNBZ8okLiEl9B4nbfBvQXdkYOl9H9TdDYlWLNKb1S -8Y4SNQ4hrfKspYVIBVvWfuwnphdLeexs4ovU6OkXeVPFPSsjihX9I+sJ3bFCLvkj -lVXY/pJy/Z6QQlPg71LkCiH0Hv2RIvGZ1UtTu12d8BiF3oO8Nnzq4kiyfpPJ5QDR -GsTKmXxEzgCcR+DI4g05hI2BQuq8Xjw4jZzt0IOcWhR2ZxBwfzLQp/hAQK69iPCN -3DfD/eMr1EF8kAWec4eo3CFwHvrPpEdIMeNE7q9fuyfVPJGQZFKNHl7rF4YqYn/F -4XGJxRCjd860JkJDLrmXazED6cLE1IvYPCLUsfK8 ------END CERTIFICATE----- diff --git a/tests/certification/bindings/kafka/oauth-config/tls/hydra.key b/tests/certification/bindings/kafka/oauth-config/tls/hydra.key deleted file mode 100644 index bb1acf1b4..000000000 --- a/tests/certification/bindings/kafka/oauth-config/tls/hydra.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEAyAm4QZqcRTxbaEU43YpItqLmUG1KBxOJZSEJyc97Wv/oFov3 -M6zpgt2h88faSdZInwRuTJW3Zx/FgkF8f7z1z+YaU5kw4wKfOxcoJNilnW/20Aa6 -UUyBIALityFifbWIXAw8UsBBedsTZ0ZJJcWaVUWV9UpnHZ3RKoqGgJ8VhDNcfiSX -I65OlHeHb2l9BtccPIOrtCk9SOtQI2tjgAI/oj4oekSusUkXcbxC123oQpQRzqST -+rEcNN18OB0SRHrFqqxgBRKdKkmbpSsJ9jBwFvNFVRf1PYaIFEKwVRUysP+aEvXr -vgVdgGN0YfAwAgdG5aRJIyGnIY46NgPMLoQXjQIDAQABAoIBAQDEErLmqxOt0aGP -LPq2PEtVqYqzHszG7uFnnOCpTZQN+HSXVQ4zOrOQMIoEF8rhQQbhx0gODVo93KiO -Kn5L/v26kEMR2kBO400McIBKzYhYL1zvPwj1k1Wl+O4crr6JlZxZDS07t3L2bEQy -oHQmb+/80T5RtmIoZ36Ugj+gZ06BytKPY2yZRpLnF/p9V77JK2BT2pg1EXahU5LL -wGhodg+MqFrKPk0TpdQ7edipHEiqprk/sEH9KA4cPfa83LBv6xRcHYBzlA0mHnZo -jgGdptDAFJeJcMLwywF1CvI/x5Y0mAkDN95uFcw8/ozX2pKGuIZYY9BjR444zKm2 -8V7Br2gBAoGBAN2n2BlBXTjOgZ7c50fGFA+oR23C90r3AHwnh1FOnCzKOUNbW48F -tsKvmI0DUK+sg+ZkGIEz1ll81FVzCAZQ8sii3LV5qnW7QVhZszHbKWtI9ulcFDqe -ZqKlOahy5GmcGfxbniufrHaBlP+Y1gwJd8NXjoFKNxLLtQ8S25e4QwKNAoGBAOcI -ZH+eaZ3653fFPzuJtsbbfqB5HW6bTLIUqnwNRGghvMP0JTLzYYVlcaLMrI2L50Qf -Z5IEl7+uVeTmRehkoe5J3r5tIifKrVGnQM7inpTfkCOlY2tsAL8/XvQ/6ikBEt2J -r166mOk3RfjuuXuBFrPwfpZ5fMggFa92e5ukWqkBAoGAQ12VsedJu9AXWP7uU8QB -qNiODO/qVKBJR3KED9QCZyJ20N/dLdSgvP69MG5HgXy/AbB+OhZVGRF1Pxsc3z6O -6yeESKtXgTyOGZn5ejePmQmt8TKI+1/U9a2dnnJ8tRQ6WZZGth9rPQEZFa2PsEzY -V0gvCWBS6KV8u74Re0UHKKkCgYB9j8Ae49d+9rgKDfd5wjTGCtDdIjXuwRSDzFuD -pCpDdeKDlRMKh9++gg2qbxZwr1J3YaIGZ9yZXoRsLQJddSPUv+0BDYr8mVhtAjtk -tSF+w6ow1VgdL8uQJT7T/FClDGJWaNgY4cztIw8yZXwFNXlDPjduTISWt2lRvVEc -m8xyAQKBgF+aAk2qJ8/MM4aXoWgjkWiDGvgfVmWsYMpalz34PDP+hzPg3LxaGKsn -jm+LQs9Z/WX26hxZK0HWQbcCsJ81mBvgeXnUrY/T50Zvd7zUFF+1WG7Is9KUlLA1 -ceQzJcixurQtuUSkwj2PfVziiufkHk43tuzDQ57carUX6kg2OwAD ------END RSA PRIVATE KEY----- diff --git a/tests/certification/bindings/kafka/strimzi-broker-certs/host.pem b/tests/certification/bindings/kafka/strimzi-broker-certs/host.pem deleted file mode 100644 index 88dd1825b..000000000 --- a/tests/certification/bindings/kafka/strimzi-broker-certs/host.pem +++ /dev/null @@ -1,22 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDpTCCAo2gAwIBAgIUTAjabskCLxIqbh2E4MnYIsivipswDQYJKoZIhvcNAQEL -BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew -HhcNMjExMjA0MTkwMjAwWhcNMjIxMjA0MTkwMjAwWjAjMRIwEAYDVQQKEwlEYXBy -IFRlc3QxDTALBgNVBAMTBGRhcHIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK -AoIBAQC5rlhpdzY2RuRRRKevotZnLUx/dh2wLvCMluSxKFJYvC7DXK3cHZh1+6Wo -cdlsEYY3ZQ7Pt/N8DkV7ODqSvFyhJu+1fCY3elMfZcxSw24UJ2aXzlx5RbNhLAI0 -E804ugAp3qss4ygCwQ4U2jMGXqeVpi7gyGsYybEUOMSorx5OBgiJAKkaATNMBqdp -MX2FKzBU3owpAcuXhIGSdKblYQuZJmAfITnaJFO4ffLyn9m4I9n/dDfZag/TCZBL -27uIo79mZO99YfhMfdrifH3FkvE/14/JUPhwHAChoCbDol0/V/KDv0tp3vQbQH+7 -1dyrAWhszswSXQGgADYm8y74dlQpAgMBAAGjgcgwgcUwDgYDVR0PAQH/BAQDAgWg -MB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0G -A1UdDgQWBBQ4eToXZz4AH4YbuW23vy99T8d8OTAfBgNVHSMEGDAWgBR+l/nJVNA+ -PUmfXs1kYJbBfN4JbzBGBgNVHREEPzA9ggRkYXBygglsb2NhbGhvc3SCB2thZmth -LTGCB2thZmthLTKCB2thZmthLTOCD2thZmFrLWJvb3RzdHJhcDANBgkqhkiG9w0B -AQsFAAOCAQEAAapIJIdQhGF2qz/N4i/nIwJHGxUapgtVrydC8kw7DeuQi2usG62Y -hGNnBAoJCR0auSQ2P3SWEO19o1doZjFroqFkNIXdTT+aHxLg0k89H203oeMSI43x -xTlmJCjBNw4zQD9jC1c6u/W6WBwN2SJGBZrdmA95KQrz+gan9nh6ecPYeGF89io2 -G20dRE2cGwbt7LAImK87M8LXbw/Of28gYMh3L14CNy6oma3izMix9xhUhDVACnVy -TaltjNIiAlFP2g4GIsPSYTMAOeIzIU/LxKlxg8mLg1bTPwb5IZK1wFwPBY5rnNqx -OrycW7rZKfrg2eZml8FnYlzO64u41oC47A== ------END CERTIFICATE----- diff --git a/tests/certification/bindings/kafka/strimzi-broker-certs/kafka-1.crt b/tests/certification/bindings/kafka/strimzi-broker-certs/kafka-1.crt deleted file mode 120000 index 76a04d5ed..000000000 --- a/tests/certification/bindings/kafka/strimzi-broker-certs/kafka-1.crt +++ /dev/null @@ -1 +0,0 @@ -kafka.crt \ No newline at end of file diff --git a/tests/certification/bindings/kafka/strimzi-broker-certs/kafka-1.key b/tests/certification/bindings/kafka/strimzi-broker-certs/kafka-1.key deleted file mode 120000 index 6b5b149f4..000000000 --- a/tests/certification/bindings/kafka/strimzi-broker-certs/kafka-1.key +++ /dev/null @@ -1 +0,0 @@ -kafka.key \ No newline at end of file diff --git a/tests/certification/bindings/kafka/strimzi-broker-certs/kafka-1.p12 b/tests/certification/bindings/kafka/strimzi-broker-certs/kafka-1.p12 deleted file mode 120000 index b9adf7d21..000000000 --- a/tests/certification/bindings/kafka/strimzi-broker-certs/kafka-1.p12 +++ /dev/null @@ -1 +0,0 @@ -kafka.p12 \ No newline at end of file diff --git a/tests/certification/bindings/kafka/strimzi-broker-certs/kafka-1.password b/tests/certification/bindings/kafka/strimzi-broker-certs/kafka-1.password deleted file mode 120000 index b86aaffaa..000000000 --- a/tests/certification/bindings/kafka/strimzi-broker-certs/kafka-1.password +++ /dev/null @@ -1 +0,0 @@ -kafka.password \ No newline at end of file diff --git a/tests/certification/bindings/kafka/strimzi-broker-certs/kafka-2.crt b/tests/certification/bindings/kafka/strimzi-broker-certs/kafka-2.crt deleted file mode 120000 index 76a04d5ed..000000000 --- a/tests/certification/bindings/kafka/strimzi-broker-certs/kafka-2.crt +++ /dev/null @@ -1 +0,0 @@ -kafka.crt \ No newline at end of file diff --git a/tests/certification/bindings/kafka/strimzi-broker-certs/kafka-2.key b/tests/certification/bindings/kafka/strimzi-broker-certs/kafka-2.key deleted file mode 120000 index 6b5b149f4..000000000 --- a/tests/certification/bindings/kafka/strimzi-broker-certs/kafka-2.key +++ /dev/null @@ -1 +0,0 @@ -kafka.key \ No newline at end of file diff --git a/tests/certification/bindings/kafka/strimzi-broker-certs/kafka-2.password b/tests/certification/bindings/kafka/strimzi-broker-certs/kafka-2.password deleted file mode 120000 index b86aaffaa..000000000 --- a/tests/certification/bindings/kafka/strimzi-broker-certs/kafka-2.password +++ /dev/null @@ -1 +0,0 @@ -kafka.password \ No newline at end of file diff --git a/tests/certification/bindings/kafka/strimzi-broker-certs/kafka-3.crt b/tests/certification/bindings/kafka/strimzi-broker-certs/kafka-3.crt deleted file mode 120000 index 76a04d5ed..000000000 --- a/tests/certification/bindings/kafka/strimzi-broker-certs/kafka-3.crt +++ /dev/null @@ -1 +0,0 @@ -kafka.crt \ No newline at end of file diff --git a/tests/certification/bindings/kafka/strimzi-broker-certs/kafka-3.key b/tests/certification/bindings/kafka/strimzi-broker-certs/kafka-3.key deleted file mode 120000 index 6b5b149f4..000000000 --- a/tests/certification/bindings/kafka/strimzi-broker-certs/kafka-3.key +++ /dev/null @@ -1 +0,0 @@ -kafka.key \ No newline at end of file diff --git a/tests/certification/bindings/kafka/strimzi-broker-certs/kafka-3.p12 b/tests/certification/bindings/kafka/strimzi-broker-certs/kafka-3.p12 deleted file mode 120000 index b9adf7d21..000000000 --- a/tests/certification/bindings/kafka/strimzi-broker-certs/kafka-3.p12 +++ /dev/null @@ -1 +0,0 @@ -kafka.p12 \ No newline at end of file diff --git a/tests/certification/bindings/kafka/strimzi-broker-certs/kafka-3.password b/tests/certification/bindings/kafka/strimzi-broker-certs/kafka-3.password deleted file mode 120000 index b86aaffaa..000000000 --- a/tests/certification/bindings/kafka/strimzi-broker-certs/kafka-3.password +++ /dev/null @@ -1 +0,0 @@ -kafka.password \ No newline at end of file diff --git a/tests/certification/bindings/kafka/strimzi-broker-certs/kafka.crt b/tests/certification/bindings/kafka/strimzi-broker-certs/kafka.crt deleted file mode 100644 index 88dd1825b..000000000 --- a/tests/certification/bindings/kafka/strimzi-broker-certs/kafka.crt +++ /dev/null @@ -1,22 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDpTCCAo2gAwIBAgIUTAjabskCLxIqbh2E4MnYIsivipswDQYJKoZIhvcNAQEL -BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew -HhcNMjExMjA0MTkwMjAwWhcNMjIxMjA0MTkwMjAwWjAjMRIwEAYDVQQKEwlEYXBy -IFRlc3QxDTALBgNVBAMTBGRhcHIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK -AoIBAQC5rlhpdzY2RuRRRKevotZnLUx/dh2wLvCMluSxKFJYvC7DXK3cHZh1+6Wo -cdlsEYY3ZQ7Pt/N8DkV7ODqSvFyhJu+1fCY3elMfZcxSw24UJ2aXzlx5RbNhLAI0 -E804ugAp3qss4ygCwQ4U2jMGXqeVpi7gyGsYybEUOMSorx5OBgiJAKkaATNMBqdp -MX2FKzBU3owpAcuXhIGSdKblYQuZJmAfITnaJFO4ffLyn9m4I9n/dDfZag/TCZBL -27uIo79mZO99YfhMfdrifH3FkvE/14/JUPhwHAChoCbDol0/V/KDv0tp3vQbQH+7 -1dyrAWhszswSXQGgADYm8y74dlQpAgMBAAGjgcgwgcUwDgYDVR0PAQH/BAQDAgWg -MB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0G -A1UdDgQWBBQ4eToXZz4AH4YbuW23vy99T8d8OTAfBgNVHSMEGDAWgBR+l/nJVNA+ -PUmfXs1kYJbBfN4JbzBGBgNVHREEPzA9ggRkYXBygglsb2NhbGhvc3SCB2thZmth -LTGCB2thZmthLTKCB2thZmthLTOCD2thZmFrLWJvb3RzdHJhcDANBgkqhkiG9w0B -AQsFAAOCAQEAAapIJIdQhGF2qz/N4i/nIwJHGxUapgtVrydC8kw7DeuQi2usG62Y -hGNnBAoJCR0auSQ2P3SWEO19o1doZjFroqFkNIXdTT+aHxLg0k89H203oeMSI43x -xTlmJCjBNw4zQD9jC1c6u/W6WBwN2SJGBZrdmA95KQrz+gan9nh6ecPYeGF89io2 -G20dRE2cGwbt7LAImK87M8LXbw/Of28gYMh3L14CNy6oma3izMix9xhUhDVACnVy -TaltjNIiAlFP2g4GIsPSYTMAOeIzIU/LxKlxg8mLg1bTPwb5IZK1wFwPBY5rnNqx -OrycW7rZKfrg2eZml8FnYlzO64u41oC47A== ------END CERTIFICATE----- diff --git a/tests/certification/bindings/kafka/strimzi-broker-certs/kafka.key b/tests/certification/bindings/kafka/strimzi-broker-certs/kafka.key deleted file mode 100644 index 878ed77f2..000000000 --- a/tests/certification/bindings/kafka/strimzi-broker-certs/kafka.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEogIBAAKCAQEAua5YaXc2NkbkUUSnr6LWZy1Mf3YdsC7wjJbksShSWLwuw1yt -3B2YdfulqHHZbBGGN2UOz7fzfA5Fezg6krxcoSbvtXwmN3pTH2XMUsNuFCdml85c -eUWzYSwCNBPNOLoAKd6rLOMoAsEOFNozBl6nlaYu4MhrGMmxFDjEqK8eTgYIiQCp -GgEzTAanaTF9hSswVN6MKQHLl4SBknSm5WELmSZgHyE52iRTuH3y8p/ZuCPZ/3Q3 -2WoP0wmQS9u7iKO/ZmTvfWH4TH3a4nx9xZLxP9ePyVD4cBwAoaAmw6JdP1fyg79L -ad70G0B/u9XcqwFobM7MEl0BoAA2JvMu+HZUKQIDAQABAoIBACZz2JNewLdUzwuV -cDSLQGN1mhX7XAKUdfRne0zE0OjXb8e9dbPT3TLxvki36xLaPjVSlFKoAaB7RCBU -cKzanUQyUAoBf9iVWIl0B3BMUIuT7Uca0UO8D33cI0itoR5SRp5lIoXVNP/9AvGG -jnKPP51aIPMkDim/+w/5AaD9QwVdGC2BWNn8bFykz/DfIB0tiVTec8/pWaP7vHGM -FriQbL07Yrj3BE0ndp5cL52ZbH9OmQ/hXUHCk6vCuV/yrqljeLPGbEYkpmhm/fMO -Fa3pX6wR+QgZ5lta870jK52bexyoGWgsMcTTl8+7q4DYM2YREEKicAlbOh92bdm4 -tnjIiVECgYEA1btWqCtxWat5tzXeYAowYs/uia/ANbmg+SGqIeVqGn4EyLIBYnmZ -jexfWliLj7Nk802fbNIO9sStMt6q7vvRbYR2ZHFPU0Th9m/XVPdJKJ9qpMkSWdY3 -P7VlQuYHSZvU1ny/QtDc8dGoaxluiaJsIBde0UUcwOo/tA66OnP2n7cCgYEA3mbf -hz6W+ThofDPyJN5kFTnx4g+uNA8hnqyJeh9xcnh1t/A5BH4faZBPhokoskahUWis -yI4v6e552CHkF9jo6k397xUb/W/HO0BlKhapf8prdrG4zSE5pr140eTdr10h95SD -Wr4twfEaBNsSXRnaMxAMaVbPKfLuW0+N1Qbk6x8CgYA8EZnKS+Ngk0vzDOXB0jtF -GjFtawK3VsOCIU8ClcqbRX2stjKjbY+VjrBB4Q7gRUgDBXbgC61+90nCOUiLQCTd -BdSMaDgmK/7h1w8K5zEdhKhhRc2tiAIhGqcqBSJZMr2/xnGuoqrmH8mYyB4D+q0u -28KfSDBLm8ppnZYDZaITwwKBgDv76xYDH50gRa4aJJklEkFXW5HpQMbxvdOaHYo+ -qM6DBt0RgY9gpQBH1+slW0CaJDBc1x1QnEOv+lT87xQvgMKRPogZXW9Bkq68c4yi -iBzbb5iX3owVBgOe3tNdsxz1NZAdEkCLQrQoXygoHg/WRS+4iGBw9XcO+pLOJibq -sRtpAoGARUL0cfedOtIgGOQTNzfHqQZsRbLEKx64FI6Q8g1womr7lWWXy6RX4BZv -vm41g/PkdiES9ZfaNihRHcEhaNuA26OhiCbXe/FRcyZRX9TeCkuyQgNn9nssPIgR -edWdnN8kZKQ7ReZwMlw2UpXenAwlVoQQbHw9zpkcD2Exmp/TLAk= ------END RSA PRIVATE KEY----- diff --git a/tests/certification/bindings/kafka/strimzi-broker-certs/kafka.p12 b/tests/certification/bindings/kafka/strimzi-broker-certs/kafka.p12 deleted file mode 100644 index cb4310cdc..000000000 Binary files a/tests/certification/bindings/kafka/strimzi-broker-certs/kafka.p12 and /dev/null differ diff --git a/tests/certification/bindings/kafka/strimzi-broker-certs/kafka.password b/tests/certification/bindings/kafka/strimzi-broker-certs/kafka.password deleted file mode 100644 index 95b8721cd..000000000 --- a/tests/certification/bindings/kafka/strimzi-broker-certs/kafka.password +++ /dev/null @@ -1 +0,0 @@ -dapr-test diff --git a/tests/certification/bindings/kafka/strimzi-broker-certs/kafka2.p12 b/tests/certification/bindings/kafka/strimzi-broker-certs/kafka2.p12 deleted file mode 120000 index b9adf7d21..000000000 --- a/tests/certification/bindings/kafka/strimzi-broker-certs/kafka2.p12 +++ /dev/null @@ -1 +0,0 @@ -kafka.p12 \ No newline at end of file diff --git a/tests/certification/bindings/kafka/strimzi-ca-certs/ca.crt b/tests/certification/bindings/kafka/strimzi-ca-certs/ca.crt deleted file mode 100644 index 12d18e49c..000000000 --- a/tests/certification/bindings/kafka/strimzi-ca-certs/ca.crt +++ /dev/null @@ -1,19 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDJjCCAg6gAwIBAgIUJPqvjfNx6kMf7mE5FtW81+X8HekwDQYJKoZIhvcNAQEL -BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew -HhcNMjExMjA0MTYyNzAwWhcNMjYxMjAzMTYyNzAwWjArMRIwEAYDVQQLEwlEYXBy -IFRlc3QxFTATBgNVBAMTDERhcHIgVGVzdCBDQTCCASIwDQYJKoZIhvcNAQEBBQAD -ggEPADCCAQoCggEBAMPLpsfCUdYf+7RAY7mktcj4/qJJyNroHxS8ChwSeJ0M/dLk -I6G4kyty3TGvzmrdxkr2DW2B+ZmrZFzSVQg+kNESMhEWLJt4MtyGMNuDZcwV5kJL -NPltLYmov2z8hyD2v6agZNyiWM0k2p/dl+Ikp4DJmd08PSd+nhc5Wj9X33gsEAoK -jKptl+XGGvSlC3tIbHmBhRsP42QlLjqk5PWxINbMDePHOiYFmau3VRrbPweKTFuF -bY0Y0w8t1qOFX55hU7LkMEXjLmuUfFUEZvn3NUTvH80gKDioiJTC7NBRE6sCYAlm -b4Vvix3p9Y/yNKbMA5J3chaZdTZfVqAXplZY3jMCAwEAAaNCMEAwDgYDVR0PAQH/ -BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFH6X+clU0D49SZ9ezWRg -lsF83glvMA0GCSqGSIb3DQEBCwUAA4IBAQAjeaFNxIhWZkDYiwsVP5R2JqFifZbq -A/m9YJypRwA+rUeBLFGuIh4QPFf2fZlskJYmFaDB3aplQGoSIzB1HCC0OAhJM5Ec -z6gm+bhqDfCaWz1HfmpvvQes1l/mUzYx5GfiX202W87CMKMQ+5WSg1IsCPFwYN2w -nZkGKYkh9D9TzIFMfi2b1G+O+BuUUyOAXvT8zcJ17GexRHHdc1Gq+1PgDPDL1Sug -rLHmo+dDTZhIV5D14wvxsNHTTr5tt0aaQw3fJqo6P2HE2dBiqadSYnlwS7BQ9Jxc -MlmFggFubM9/QGQ/hGQYmTp+LSlM5ndaVA80o7+SOQZ2aliuH0fQN3ST ------END CERTIFICATE----- diff --git a/tests/certification/bindings/kafka/strimzi-ca-certs/ca.p12 b/tests/certification/bindings/kafka/strimzi-ca-certs/ca.p12 deleted file mode 100644 index ed16cf240..000000000 Binary files a/tests/certification/bindings/kafka/strimzi-ca-certs/ca.p12 and /dev/null differ diff --git a/tests/certification/bindings/kafka/strimzi-ca-certs/ca.password b/tests/certification/bindings/kafka/strimzi-ca-certs/ca.password deleted file mode 100644 index 95b8721cd..000000000 --- a/tests/certification/bindings/kafka/strimzi-ca-certs/ca.password +++ /dev/null @@ -1 +0,0 @@ -dapr-test diff --git a/tests/certification/bindings/kafka/strimzi-client-ca/ca.crt b/tests/certification/bindings/kafka/strimzi-client-ca/ca.crt deleted file mode 100644 index 12d18e49c..000000000 --- a/tests/certification/bindings/kafka/strimzi-client-ca/ca.crt +++ /dev/null @@ -1,19 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDJjCCAg6gAwIBAgIUJPqvjfNx6kMf7mE5FtW81+X8HekwDQYJKoZIhvcNAQEL -BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew -HhcNMjExMjA0MTYyNzAwWhcNMjYxMjAzMTYyNzAwWjArMRIwEAYDVQQLEwlEYXBy -IFRlc3QxFTATBgNVBAMTDERhcHIgVGVzdCBDQTCCASIwDQYJKoZIhvcNAQEBBQAD -ggEPADCCAQoCggEBAMPLpsfCUdYf+7RAY7mktcj4/qJJyNroHxS8ChwSeJ0M/dLk -I6G4kyty3TGvzmrdxkr2DW2B+ZmrZFzSVQg+kNESMhEWLJt4MtyGMNuDZcwV5kJL -NPltLYmov2z8hyD2v6agZNyiWM0k2p/dl+Ikp4DJmd08PSd+nhc5Wj9X33gsEAoK -jKptl+XGGvSlC3tIbHmBhRsP42QlLjqk5PWxINbMDePHOiYFmau3VRrbPweKTFuF -bY0Y0w8t1qOFX55hU7LkMEXjLmuUfFUEZvn3NUTvH80gKDioiJTC7NBRE6sCYAlm -b4Vvix3p9Y/yNKbMA5J3chaZdTZfVqAXplZY3jMCAwEAAaNCMEAwDgYDVR0PAQH/ -BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFH6X+clU0D49SZ9ezWRg -lsF83glvMA0GCSqGSIb3DQEBCwUAA4IBAQAjeaFNxIhWZkDYiwsVP5R2JqFifZbq -A/m9YJypRwA+rUeBLFGuIh4QPFf2fZlskJYmFaDB3aplQGoSIzB1HCC0OAhJM5Ec -z6gm+bhqDfCaWz1HfmpvvQes1l/mUzYx5GfiX202W87CMKMQ+5WSg1IsCPFwYN2w -nZkGKYkh9D9TzIFMfi2b1G+O+BuUUyOAXvT8zcJ17GexRHHdc1Gq+1PgDPDL1Sug -rLHmo+dDTZhIV5D14wvxsNHTTr5tt0aaQw3fJqo6P2HE2dBiqadSYnlwS7BQ9Jxc -MlmFggFubM9/QGQ/hGQYmTp+LSlM5ndaVA80o7+SOQZ2aliuH0fQN3ST ------END CERTIFICATE----- diff --git a/tests/certification/bindings/kafka/strimzi-client-ca/ca.p12 b/tests/certification/bindings/kafka/strimzi-client-ca/ca.p12 deleted file mode 100644 index ed16cf240..000000000 Binary files a/tests/certification/bindings/kafka/strimzi-client-ca/ca.p12 and /dev/null differ diff --git a/tests/certification/bindings/kafka/strimzi-client-ca/ca.password b/tests/certification/bindings/kafka/strimzi-client-ca/ca.password deleted file mode 100644 index 95b8721cd..000000000 --- a/tests/certification/bindings/kafka/strimzi-client-ca/ca.password +++ /dev/null @@ -1 +0,0 @@ -dapr-test diff --git a/tests/certification/bindings/kafka/strimzi-kafka1-config/advertised-hostnames.config b/tests/certification/bindings/kafka/strimzi-kafka1-config/advertised-hostnames.config deleted file mode 100644 index 715c12d2f..000000000 --- a/tests/certification/bindings/kafka/strimzi-kafka1-config/advertised-hostnames.config +++ /dev/null @@ -1 +0,0 @@ -PLAIN_9092_1://localhost MTLS_9094_1://localhost OAUTH_9093_1://localhost diff --git a/tests/certification/bindings/kafka/strimzi-kafka1-config/advertised-ports.config b/tests/certification/bindings/kafka/strimzi-kafka1-config/advertised-ports.config deleted file mode 100644 index 85d6a4007..000000000 --- a/tests/certification/bindings/kafka/strimzi-kafka1-config/advertised-ports.config +++ /dev/null @@ -1 +0,0 @@ -PLAIN_9092_1://19092 MTLS_9094_1://19094 OAUTH_9093_1://19093 diff --git a/tests/certification/bindings/kafka/strimzi-kafka1-config/listeners.config b/tests/certification/bindings/kafka/strimzi-kafka1-config/listeners.config deleted file mode 100644 index 301723f8a..000000000 --- a/tests/certification/bindings/kafka/strimzi-kafka1-config/listeners.config +++ /dev/null @@ -1 +0,0 @@ -PLAIN_9092 MTLS_9094 OAUTH_9093 diff --git a/tests/certification/bindings/kafka/strimzi-kafka1-config/log4j.properties b/tests/certification/bindings/kafka/strimzi-kafka1-config/log4j.properties deleted file mode 100644 index 9cc089d92..000000000 --- a/tests/certification/bindings/kafka/strimzi-kafka1-config/log4j.properties +++ /dev/null @@ -1,18 +0,0 @@ -log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender -log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout -log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} %p %m (%c) [%t]%n -kafka.root.logger.level=DEBUG -log4j.rootLogger=${kafka.root.logger.level}, CONSOLE -log4j.logger.org.I0Itec.zkclient.ZkClient=INFO -log4j.logger.org.apache.zookeeper=INFO -log4j.logger.kafka=INFO -log4j.logger.org.apache.kafka=INFO -log4j.logger.kafka.request.logger=WARN, CONSOLE -log4j.logger.kafka.network.Processor=OFF -log4j.logger.kafka.server.KafkaApis=OFF -log4j.logger.kafka.network.RequestChannel$=WARN -log4j.logger.kafka.controller=TRACE -log4j.logger.kafka.log.LogCleaner=INFO -log4j.logger.state.change.logger=TRACE -log4j.logger.kafka.authorizer.logger=INFO - diff --git a/tests/certification/bindings/kafka/strimzi-kafka1-config/server.config b/tests/certification/bindings/kafka/strimzi-kafka1-config/server.config deleted file mode 100644 index 5849c8300..000000000 --- a/tests/certification/bindings/kafka/strimzi-kafka1-config/server.config +++ /dev/null @@ -1,101 +0,0 @@ -############################## -############################## -# This file is automatically generated by the Strimzi Cluster Operator -# Any changes to this file will be ignored and overwritten! -############################## -############################## - -########## -# Broker ID -########## -broker.id=${STRIMZI_BROKER_ID} - -########## -# Zookeeper -########## -zookeeper.connect=zookeeper:2181 -zookeeper.clientCnxnSocket=org.apache.zookeeper.ClientCnxnSocketNetty -zookeeper.ssl.client.enable=false - -########## -# Kafka message logs configuration -########## -log.dirs=/var/lib/kafka/data/kafka-log${STRIMZI_BROKER_ID} - -########## -# Control Plane listener -########## -listener.name.controlplane-9090.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12 -listener.name.controlplane-9090.ssl.keystore.password=${CERTS_STORE_PASSWORD} -listener.name.controlplane-9090.ssl.keystore.type=PKCS12 -listener.name.controlplane-9090.ssl.truststore.location=/tmp/kafka/cluster.truststore.p12 -listener.name.controlplane-9090.ssl.truststore.password=${CERTS_STORE_PASSWORD} -listener.name.controlplane-9090.ssl.truststore.type=PKCS12 -listener.name.controlplane-9090.ssl.client.auth=required - -########## -# Replication listener -########## -listener.name.replication-9091.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12 -listener.name.replication-9091.ssl.keystore.password=${CERTS_STORE_PASSWORD} -listener.name.replication-9091.ssl.keystore.type=PKCS12 -listener.name.replication-9091.ssl.truststore.location=/tmp/kafka/cluster.truststore.p12 -listener.name.replication-9091.ssl.truststore.password=${CERTS_STORE_PASSWORD} -listener.name.replication-9091.ssl.truststore.type=PKCS12 -listener.name.replication-9091.ssl.client.auth=required - -########## -# Listener configuration: MTLS-9094 -########## -listener.name.mtls-9094.ssl.client.auth=required -listener.name.mtls-9094.ssl.truststore.location=/tmp/kafka/clients.truststore.p12 -listener.name.mtls-9094.ssl.truststore.password=${CERTS_STORE_PASSWORD} -listener.name.mtls-9094.ssl.truststore.type=PKCS12 - -listener.name.mtls-9094.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12 -listener.name.mtls-9094.ssl.keystore.password=${CERTS_STORE_PASSWORD} -listener.name.mtls-9094.ssl.keystore.type=PKCS12 - - -########## -# Listener configuration: OAUTH-9093 -########## -listener.name.oauth-9093.oauthbearer.sasl.server.callback.handler.class=io.strimzi.kafka.oauth.server.JaasServerOauthValidatorCallbackHandler -listener.name.oauth-9093.oauthbearer.sasl.jaas.config=org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required unsecuredLoginStringClaim_sub="admin" oauth.client.id="kafka" oauth.valid.issuer.uri="https://hydra:4443/" oauth.introspection.endpoint.uri="https://hydra:4444/oauth2/introspect" oauth.username.claim="sub" oauth.client.secret="dapr-test" oauth.ssl.truststore.location="/tmp/kafka/oauth-oauth-9093.truststore.p12" oauth.ssl.truststore.password="${CERTS_STORE_PASSWORD}" oauth.ssl.truststore.type="PKCS12"; -listener.name.oauth-9093.sasl.enabled.mechanisms=OAUTHBEARER -listener.name.oauth-9093.connections.max.reauth.ms=1800000 - -listener.name.oauth-9093.ssl.keystore.location=/tmp/kafka/custom-oauth-9093.keystore.p12 -listener.name.oauth-9093.ssl.keystore.password=${CERTS_STORE_PASSWORD} -listener.name.oauth-9093.ssl.keystore.type=PKCS12 - - -principal.builder.class=io.strimzi.kafka.oauth.server.OAuthKafkaPrincipalBuilder - -########## -# Common listener configuration -########## -listeners=CONTROLPLANE-9090://0.0.0.0:9090,REPLICATION-9091://0.0.0.0:9091,MTLS-9094://0.0.0.0:19094,OAUTH-9093://0.0.0.0:19093,PLAIN-9092://0.0.0.0:19092 -advertised.listeners=CONTROLPLANE-9090://kafka-1:9090,REPLICATION-9091://kafka-1:9091,MTLS-9094://${STRIMZI_MTLS_9094_ADVERTISED_HOSTNAME}:${STRIMZI_MTLS_9094_ADVERTISED_PORT},OAUTH-9093://${STRIMZI_OAUTH_9093_ADVERTISED_HOSTNAME}:${STRIMZI_OAUTH_9093_ADVERTISED_PORT},PLAIN-9092://${STRIMZI_PLAIN_9092_ADVERTISED_HOSTNAME}:${STRIMZI_PLAIN_9092_ADVERTISED_PORT} -listener.security.protocol.map=CONTROLPLANE-9090:SSL,REPLICATION-9091:SSL,MTLS-9094:SSL,OAUTH-9093:SASL_SSL,PLAIN-9092:PLAINTEXT -inter.broker.listener.name=REPLICATION-9091 -sasl.enabled.mechanisms= -ssl.secure.random.implementation=SHA1PRNG -ssl.endpoint.identification.algorithm=HTTPS - -########## -# Authorization -########## -allow.everyone.if.no.acl.found=true -authorizer.class.name=kafka.security.authorizer.AclAuthorizer -super.users=User:CN=dapr,O=Dapr Test - -########## -# User provided configuration -########## -num.partitions=10 -auto.create.topics.enable=true -group.initial.rebalance.delay.ms=0 -offsets.topic.replication.factor=3 -inter.broker.protocol.version=3.0 -log.message.format.version=3.0 diff --git a/tests/certification/bindings/kafka/strimzi-kafka2-config/advertised-hostnames.config b/tests/certification/bindings/kafka/strimzi-kafka2-config/advertised-hostnames.config deleted file mode 100644 index ca1b4d059..000000000 --- a/tests/certification/bindings/kafka/strimzi-kafka2-config/advertised-hostnames.config +++ /dev/null @@ -1 +0,0 @@ -PLAIN_9092_2://localhost MTLS_9094_2://localhost OAUTH_9093_2://localhost diff --git a/tests/certification/bindings/kafka/strimzi-kafka2-config/advertised-ports.config b/tests/certification/bindings/kafka/strimzi-kafka2-config/advertised-ports.config deleted file mode 100644 index 026044053..000000000 --- a/tests/certification/bindings/kafka/strimzi-kafka2-config/advertised-ports.config +++ /dev/null @@ -1 +0,0 @@ -PLAIN_9092_2://29092 MTLS_9094_2://29094 OAUTH_9093_2://29093 diff --git a/tests/certification/bindings/kafka/strimzi-kafka2-config/listeners.config b/tests/certification/bindings/kafka/strimzi-kafka2-config/listeners.config deleted file mode 100644 index 301723f8a..000000000 --- a/tests/certification/bindings/kafka/strimzi-kafka2-config/listeners.config +++ /dev/null @@ -1 +0,0 @@ -PLAIN_9092 MTLS_9094 OAUTH_9093 diff --git a/tests/certification/bindings/kafka/strimzi-kafka2-config/log4j.properties b/tests/certification/bindings/kafka/strimzi-kafka2-config/log4j.properties deleted file mode 100644 index a9e2ba234..000000000 --- a/tests/certification/bindings/kafka/strimzi-kafka2-config/log4j.properties +++ /dev/null @@ -1,18 +0,0 @@ -log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender -log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout -log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} %p %m (%c) [%t]%n -kafka.root.logger.level=INFO -log4j.rootLogger=${kafka.root.logger.level}, CONSOLE -log4j.logger.org.I0Itec.zkclient.ZkClient=INFO -log4j.logger.org.apache.zookeeper=INFO -log4j.logger.kafka=INFO -log4j.logger.org.apache.kafka=INFO -log4j.logger.kafka.request.logger=WARN, CONSOLE -log4j.logger.kafka.network.Processor=OFF -log4j.logger.kafka.server.KafkaApis=OFF -log4j.logger.kafka.network.RequestChannel$=WARN -log4j.logger.kafka.controller=TRACE -log4j.logger.kafka.log.LogCleaner=INFO -log4j.logger.state.change.logger=TRACE -log4j.logger.kafka.authorizer.logger=INFO - diff --git a/tests/certification/bindings/kafka/strimzi-kafka2-config/server.config b/tests/certification/bindings/kafka/strimzi-kafka2-config/server.config deleted file mode 100644 index fc6278178..000000000 --- a/tests/certification/bindings/kafka/strimzi-kafka2-config/server.config +++ /dev/null @@ -1,101 +0,0 @@ -############################## -############################## -# This file is automatically generated by the Strimzi Cluster Operator -# Any changes to this file will be ignored and overwritten! -############################## -############################## - -########## -# Broker ID -########## -broker.id=${STRIMZI_BROKER_ID} - -########## -# Zookeeper -########## -zookeeper.connect=zookeeper:2181 -zookeeper.clientCnxnSocket=org.apache.zookeeper.ClientCnxnSocketNetty -zookeeper.ssl.client.enable=false - -########## -# Kafka message logs configuration -########## -log.dirs=/var/lib/kafka/data/kafka-log${STRIMZI_BROKER_ID} - -########## -# Control Plane listener -########## -listener.name.controlplane-9090.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12 -listener.name.controlplane-9090.ssl.keystore.password=${CERTS_STORE_PASSWORD} -listener.name.controlplane-9090.ssl.keystore.type=PKCS12 -listener.name.controlplane-9090.ssl.truststore.location=/tmp/kafka/cluster.truststore.p12 -listener.name.controlplane-9090.ssl.truststore.password=${CERTS_STORE_PASSWORD} -listener.name.controlplane-9090.ssl.truststore.type=PKCS12 -listener.name.controlplane-9090.ssl.client.auth=required - -########## -# Replication listener -########## -listener.name.replication-9091.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12 -listener.name.replication-9091.ssl.keystore.password=${CERTS_STORE_PASSWORD} -listener.name.replication-9091.ssl.keystore.type=PKCS12 -listener.name.replication-9091.ssl.truststore.location=/tmp/kafka/cluster.truststore.p12 -listener.name.replication-9091.ssl.truststore.password=${CERTS_STORE_PASSWORD} -listener.name.replication-9091.ssl.truststore.type=PKCS12 -listener.name.replication-9091.ssl.client.auth=required - -########## -# Listener configuration: MTLS-9094 -########## -listener.name.mtls-9094.ssl.client.auth=required -listener.name.mtls-9094.ssl.truststore.location=/tmp/kafka/clients.truststore.p12 -listener.name.mtls-9094.ssl.truststore.password=${CERTS_STORE_PASSWORD} -listener.name.mtls-9094.ssl.truststore.type=PKCS12 - -listener.name.mtls-9094.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12 -listener.name.mtls-9094.ssl.keystore.password=${CERTS_STORE_PASSWORD} -listener.name.mtls-9094.ssl.keystore.type=PKCS12 - - -########## -# Listener configuration: OAUTH-9093 -########## -listener.name.oauth-9093.oauthbearer.sasl.server.callback.handler.class=io.strimzi.kafka.oauth.server.JaasServerOauthValidatorCallbackHandler -listener.name.oauth-9093.oauthbearer.sasl.jaas.config=org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required unsecuredLoginStringClaim_sub="admin" oauth.client.id="kafka" oauth.valid.issuer.uri="https://hydra:4443/" oauth.introspection.endpoint.uri="https://hydra:4444/oauth2/introspect" oauth.username.claim="sub" oauth.client.secret="dapr-test" oauth.ssl.truststore.location="/tmp/kafka/oauth-oauth-9093.truststore.p12" oauth.ssl.truststore.password="${CERTS_STORE_PASSWORD}" oauth.ssl.truststore.type="PKCS12"; -listener.name.oauth-9093.sasl.enabled.mechanisms=OAUTHBEARER -listener.name.oauth-9093.connections.max.reauth.ms=1800000 - -listener.name.oauth-9093.ssl.keystore.location=/tmp/kafka/custom-oauth-9093.keystore.p12 -listener.name.oauth-9093.ssl.keystore.password=${CERTS_STORE_PASSWORD} -listener.name.oauth-9093.ssl.keystore.type=PKCS12 - - -principal.builder.class=io.strimzi.kafka.oauth.server.OAuthKafkaPrincipalBuilder - -########## -# Common listener configuration -########## -listeners=CONTROLPLANE-9090://0.0.0.0:9090,REPLICATION-9091://0.0.0.0:9091,MTLS-9094://0.0.0.0:29094,OAUTH-9093://0.0.0.0:29093,PLAIN-9092://0.0.0.0:29092 -advertised.listeners=CONTROLPLANE-9090://kafka-2:9090,REPLICATION-9091://kafka-2:9091,MTLS-9094://${STRIMZI_MTLS_9094_ADVERTISED_HOSTNAME}:${STRIMZI_MTLS_9094_ADVERTISED_PORT},OAUTH-9093://${STRIMZI_OAUTH_9093_ADVERTISED_HOSTNAME}:${STRIMZI_OAUTH_9093_ADVERTISED_PORT},PLAIN-9092://${STRIMZI_PLAIN_9092_ADVERTISED_HOSTNAME}:${STRIMZI_PLAIN_9092_ADVERTISED_PORT} -listener.security.protocol.map=CONTROLPLANE-9090:SSL,REPLICATION-9091:SSL,MTLS-9094:SSL,OAUTH-9093:SASL_SSL,PLAIN-9092:PLAINTEXT -inter.broker.listener.name=REPLICATION-9091 -sasl.enabled.mechanisms= -ssl.secure.random.implementation=SHA1PRNG -ssl.endpoint.identification.algorithm=HTTPS - -########## -# Authorization -########## -allow.everyone.if.no.acl.found=true -authorizer.class.name=kafka.security.authorizer.AclAuthorizer -super.users=User:CN=dapr,O=Dapr Test - -########## -# User provided configuration -########## -num.partitions=10 -auto.create.topics.enable=true -group.initial.rebalance.delay.ms=0 -offsets.topic.replication.factor=3 -inter.broker.protocol.version=3.0 -log.message.format.version=3.0 diff --git a/tests/certification/bindings/kafka/strimzi-kafka3-config/advertised-hostnames.config b/tests/certification/bindings/kafka/strimzi-kafka3-config/advertised-hostnames.config deleted file mode 100644 index 505007fb2..000000000 --- a/tests/certification/bindings/kafka/strimzi-kafka3-config/advertised-hostnames.config +++ /dev/null @@ -1 +0,0 @@ -PLAIN_9092_3://localhost MTLS_9094_3://localhost OAUTH_9093_3://localhost diff --git a/tests/certification/bindings/kafka/strimzi-kafka3-config/advertised-ports.config b/tests/certification/bindings/kafka/strimzi-kafka3-config/advertised-ports.config deleted file mode 100644 index 696a21de7..000000000 --- a/tests/certification/bindings/kafka/strimzi-kafka3-config/advertised-ports.config +++ /dev/null @@ -1 +0,0 @@ -PLAIN_9092_3://39092 MTLS_9094_3://39094 OAUTH_9093_3://39093 diff --git a/tests/certification/bindings/kafka/strimzi-kafka3-config/listeners.config b/tests/certification/bindings/kafka/strimzi-kafka3-config/listeners.config deleted file mode 100644 index 301723f8a..000000000 --- a/tests/certification/bindings/kafka/strimzi-kafka3-config/listeners.config +++ /dev/null @@ -1 +0,0 @@ -PLAIN_9092 MTLS_9094 OAUTH_9093 diff --git a/tests/certification/bindings/kafka/strimzi-kafka3-config/log4j.properties b/tests/certification/bindings/kafka/strimzi-kafka3-config/log4j.properties deleted file mode 100644 index a9e2ba234..000000000 --- a/tests/certification/bindings/kafka/strimzi-kafka3-config/log4j.properties +++ /dev/null @@ -1,18 +0,0 @@ -log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender -log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout -log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} %p %m (%c) [%t]%n -kafka.root.logger.level=INFO -log4j.rootLogger=${kafka.root.logger.level}, CONSOLE -log4j.logger.org.I0Itec.zkclient.ZkClient=INFO -log4j.logger.org.apache.zookeeper=INFO -log4j.logger.kafka=INFO -log4j.logger.org.apache.kafka=INFO -log4j.logger.kafka.request.logger=WARN, CONSOLE -log4j.logger.kafka.network.Processor=OFF -log4j.logger.kafka.server.KafkaApis=OFF -log4j.logger.kafka.network.RequestChannel$=WARN -log4j.logger.kafka.controller=TRACE -log4j.logger.kafka.log.LogCleaner=INFO -log4j.logger.state.change.logger=TRACE -log4j.logger.kafka.authorizer.logger=INFO - diff --git a/tests/certification/bindings/kafka/strimzi-kafka3-config/server.config b/tests/certification/bindings/kafka/strimzi-kafka3-config/server.config deleted file mode 100644 index 9c3aa3d07..000000000 --- a/tests/certification/bindings/kafka/strimzi-kafka3-config/server.config +++ /dev/null @@ -1,101 +0,0 @@ -############################## -############################## -# This file is automatically generated by the Strimzi Cluster Operator -# Any changes to this file will be ignored and overwritten! -############################## -############################## - -########## -# Broker ID -########## -broker.id=${STRIMZI_BROKER_ID} - -########## -# Zookeeper -########## -zookeeper.connect=zookeeper:2181 -zookeeper.clientCnxnSocket=org.apache.zookeeper.ClientCnxnSocketNetty -zookeeper.ssl.client.enable=false - -########## -# Kafka message logs configuration -########## -log.dirs=/var/lib/kafka/data/kafka-log${STRIMZI_BROKER_ID} - -########## -# Control Plane listener -########## -listener.name.controlplane-9090.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12 -listener.name.controlplane-9090.ssl.keystore.password=${CERTS_STORE_PASSWORD} -listener.name.controlplane-9090.ssl.keystore.type=PKCS12 -listener.name.controlplane-9090.ssl.truststore.location=/tmp/kafka/cluster.truststore.p12 -listener.name.controlplane-9090.ssl.truststore.password=${CERTS_STORE_PASSWORD} -listener.name.controlplane-9090.ssl.truststore.type=PKCS12 -listener.name.controlplane-9090.ssl.client.auth=required - -########## -# Replication listener -########## -listener.name.replication-9091.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12 -listener.name.replication-9091.ssl.keystore.password=${CERTS_STORE_PASSWORD} -listener.name.replication-9091.ssl.keystore.type=PKCS12 -listener.name.replication-9091.ssl.truststore.location=/tmp/kafka/cluster.truststore.p12 -listener.name.replication-9091.ssl.truststore.password=${CERTS_STORE_PASSWORD} -listener.name.replication-9091.ssl.truststore.type=PKCS12 -listener.name.replication-9091.ssl.client.auth=required - -########## -# Listener configuration: MTLS-9094 -########## -listener.name.mtls-9094.ssl.client.auth=required -listener.name.mtls-9094.ssl.truststore.location=/tmp/kafka/clients.truststore.p12 -listener.name.mtls-9094.ssl.truststore.password=${CERTS_STORE_PASSWORD} -listener.name.mtls-9094.ssl.truststore.type=PKCS12 - -listener.name.mtls-9094.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12 -listener.name.mtls-9094.ssl.keystore.password=${CERTS_STORE_PASSWORD} -listener.name.mtls-9094.ssl.keystore.type=PKCS12 - - -########## -# Listener configuration: OAUTH-9093 -########## -listener.name.oauth-9093.oauthbearer.sasl.server.callback.handler.class=io.strimzi.kafka.oauth.server.JaasServerOauthValidatorCallbackHandler -listener.name.oauth-9093.oauthbearer.sasl.jaas.config=org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required unsecuredLoginStringClaim_sub="admin" oauth.client.id="kafka" oauth.valid.issuer.uri="https://hydra:4443/" oauth.introspection.endpoint.uri="https://hydra:4444/oauth2/introspect" oauth.username.claim="sub" oauth.client.secret="dapr-test" oauth.ssl.truststore.location="/tmp/kafka/oauth-oauth-9093.truststore.p12" oauth.ssl.truststore.password="${CERTS_STORE_PASSWORD}" oauth.ssl.truststore.type="PKCS12"; -listener.name.oauth-9093.sasl.enabled.mechanisms=OAUTHBEARER -listener.name.oauth-9093.connections.max.reauth.ms=1800000 - -listener.name.oauth-9093.ssl.keystore.location=/tmp/kafka/custom-oauth-9093.keystore.p12 -listener.name.oauth-9093.ssl.keystore.password=${CERTS_STORE_PASSWORD} -listener.name.oauth-9093.ssl.keystore.type=PKCS12 - - -principal.builder.class=io.strimzi.kafka.oauth.server.OAuthKafkaPrincipalBuilder - -########## -# Common listener configuration -########## -listeners=CONTROLPLANE-9090://0.0.0.0:9090,REPLICATION-9091://0.0.0.0:9091,MTLS-9094://0.0.0.0:39094,OAUTH-9093://0.0.0.0:39093,PLAIN-9092://0.0.0.0:39092 -advertised.listeners=CONTROLPLANE-9090://kafka-3:9090,REPLICATION-9091://kafka-3:9091,MTLS-9094://${STRIMZI_MTLS_9094_ADVERTISED_HOSTNAME}:${STRIMZI_MTLS_9094_ADVERTISED_PORT},OAUTH-9093://${STRIMZI_OAUTH_9093_ADVERTISED_HOSTNAME}:${STRIMZI_OAUTH_9093_ADVERTISED_PORT},PLAIN-9092://${STRIMZI_PLAIN_9092_ADVERTISED_HOSTNAME}:${STRIMZI_PLAIN_9092_ADVERTISED_PORT} -listener.security.protocol.map=CONTROLPLANE-9090:SSL,REPLICATION-9091:SSL,MTLS-9094:SSL,OAUTH-9093:SASL_SSL,PLAIN-9092:PLAINTEXT -inter.broker.listener.name=REPLICATION-9091 -sasl.enabled.mechanisms= -ssl.secure.random.implementation=SHA1PRNG -ssl.endpoint.identification.algorithm=HTTPS - -########## -# Authorization -########## -allow.everyone.if.no.acl.found=true -authorizer.class.name=kafka.security.authorizer.AclAuthorizer -super.users=User:CN=dapr,O=Dapr Test - -########## -# User provided configuration -########## -num.partitions=10 -auto.create.topics.enable=true -group.initial.rebalance.delay.ms=0 -offsets.topic.replication.factor=3 -inter.broker.protocol.version=3.0 -log.message.format.version=3.0 diff --git a/tests/certification/bindings/kafka/strimzi-listener-certs/ca.crt b/tests/certification/bindings/kafka/strimzi-listener-certs/ca.crt deleted file mode 100644 index 12d18e49c..000000000 --- a/tests/certification/bindings/kafka/strimzi-listener-certs/ca.crt +++ /dev/null @@ -1,19 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDJjCCAg6gAwIBAgIUJPqvjfNx6kMf7mE5FtW81+X8HekwDQYJKoZIhvcNAQEL -BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew -HhcNMjExMjA0MTYyNzAwWhcNMjYxMjAzMTYyNzAwWjArMRIwEAYDVQQLEwlEYXBy -IFRlc3QxFTATBgNVBAMTDERhcHIgVGVzdCBDQTCCASIwDQYJKoZIhvcNAQEBBQAD -ggEPADCCAQoCggEBAMPLpsfCUdYf+7RAY7mktcj4/qJJyNroHxS8ChwSeJ0M/dLk -I6G4kyty3TGvzmrdxkr2DW2B+ZmrZFzSVQg+kNESMhEWLJt4MtyGMNuDZcwV5kJL -NPltLYmov2z8hyD2v6agZNyiWM0k2p/dl+Ikp4DJmd08PSd+nhc5Wj9X33gsEAoK -jKptl+XGGvSlC3tIbHmBhRsP42QlLjqk5PWxINbMDePHOiYFmau3VRrbPweKTFuF -bY0Y0w8t1qOFX55hU7LkMEXjLmuUfFUEZvn3NUTvH80gKDioiJTC7NBRE6sCYAlm -b4Vvix3p9Y/yNKbMA5J3chaZdTZfVqAXplZY3jMCAwEAAaNCMEAwDgYDVR0PAQH/ -BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFH6X+clU0D49SZ9ezWRg -lsF83glvMA0GCSqGSIb3DQEBCwUAA4IBAQAjeaFNxIhWZkDYiwsVP5R2JqFifZbq -A/m9YJypRwA+rUeBLFGuIh4QPFf2fZlskJYmFaDB3aplQGoSIzB1HCC0OAhJM5Ec -z6gm+bhqDfCaWz1HfmpvvQes1l/mUzYx5GfiX202W87CMKMQ+5WSg1IsCPFwYN2w -nZkGKYkh9D9TzIFMfi2b1G+O+BuUUyOAXvT8zcJ17GexRHHdc1Gq+1PgDPDL1Sug -rLHmo+dDTZhIV5D14wvxsNHTTr5tt0aaQw3fJqo6P2HE2dBiqadSYnlwS7BQ9Jxc -MlmFggFubM9/QGQ/hGQYmTp+LSlM5ndaVA80o7+SOQZ2aliuH0fQN3ST ------END CERTIFICATE----- diff --git a/tests/certification/bindings/kafka/strimzi-listener-certs/dapr-ca-0/tls.crt b/tests/certification/bindings/kafka/strimzi-listener-certs/dapr-ca-0/tls.crt deleted file mode 100644 index 12d18e49c..000000000 --- a/tests/certification/bindings/kafka/strimzi-listener-certs/dapr-ca-0/tls.crt +++ /dev/null @@ -1,19 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDJjCCAg6gAwIBAgIUJPqvjfNx6kMf7mE5FtW81+X8HekwDQYJKoZIhvcNAQEL -BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew -HhcNMjExMjA0MTYyNzAwWhcNMjYxMjAzMTYyNzAwWjArMRIwEAYDVQQLEwlEYXBy -IFRlc3QxFTATBgNVBAMTDERhcHIgVGVzdCBDQTCCASIwDQYJKoZIhvcNAQEBBQAD -ggEPADCCAQoCggEBAMPLpsfCUdYf+7RAY7mktcj4/qJJyNroHxS8ChwSeJ0M/dLk -I6G4kyty3TGvzmrdxkr2DW2B+ZmrZFzSVQg+kNESMhEWLJt4MtyGMNuDZcwV5kJL -NPltLYmov2z8hyD2v6agZNyiWM0k2p/dl+Ikp4DJmd08PSd+nhc5Wj9X33gsEAoK -jKptl+XGGvSlC3tIbHmBhRsP42QlLjqk5PWxINbMDePHOiYFmau3VRrbPweKTFuF -bY0Y0w8t1qOFX55hU7LkMEXjLmuUfFUEZvn3NUTvH80gKDioiJTC7NBRE6sCYAlm -b4Vvix3p9Y/yNKbMA5J3chaZdTZfVqAXplZY3jMCAwEAAaNCMEAwDgYDVR0PAQH/ -BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFH6X+clU0D49SZ9ezWRg -lsF83glvMA0GCSqGSIb3DQEBCwUAA4IBAQAjeaFNxIhWZkDYiwsVP5R2JqFifZbq -A/m9YJypRwA+rUeBLFGuIh4QPFf2fZlskJYmFaDB3aplQGoSIzB1HCC0OAhJM5Ec -z6gm+bhqDfCaWz1HfmpvvQes1l/mUzYx5GfiX202W87CMKMQ+5WSg1IsCPFwYN2w -nZkGKYkh9D9TzIFMfi2b1G+O+BuUUyOAXvT8zcJ17GexRHHdc1Gq+1PgDPDL1Sug -rLHmo+dDTZhIV5D14wvxsNHTTr5tt0aaQw3fJqo6P2HE2dBiqadSYnlwS7BQ9Jxc -MlmFggFubM9/QGQ/hGQYmTp+LSlM5ndaVA80o7+SOQZ2aliuH0fQN3ST ------END CERTIFICATE----- diff --git a/tests/certification/bindings/kafka/strimzi-listener-certs/tls.crt b/tests/certification/bindings/kafka/strimzi-listener-certs/tls.crt deleted file mode 100644 index e7aaaa636..000000000 --- a/tests/certification/bindings/kafka/strimzi-listener-certs/tls.crt +++ /dev/null @@ -1,22 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDmzCCAoOgAwIBAgIUbM8Fssal+HxhavPplrJ1o4Fk/6kwDQYJKoZIhvcNAQEL -BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew -HhcNMjExMjA0MTYzMjAwWhcNMjIxMjA0MTYzMjAwWjAjMRIwEAYDVQQKEwlEYXBy -IFRlc3QxDTALBgNVBAMTBGRhcHIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK -AoIBAQC7jpeZVmiNi1I91j6Z7Z5W8z3MCuquNjConG2NjxyT7klQYFMAMlQ0j5v9 -x5hUQ6ks4JTmCBaI/gPtjDJypCPwQKtr9QIECWjM1tSSOs/lu5p9Fqd30klcivF9 -fEpuyui6KpRVobGdg8bZ27Mh4yee1fI1DhAj5ME6Ti3sLmA5uxRYLLPollNICgUs -QME2iJrm30rUmSqbKpB721ULcB7kLTn3PPqMDU3qmXLTTlioN3+hXuC0aSS5c/6f -IwHQ/l2bLApCF9rLc+bkSFBBMOEZD/iomaE7JolHGUt7vEhObSxgnJ6ZH0C+k0Y/ -RLdG9cmmrdIP6SHy8UYX4O0UsHxPAgMBAAGjgb4wgbswDgYDVR0PAQH/BAQDAgWg -MBMGA1UdJQQMMAoGCCsGAQUFBwMBMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFLpv -SEQ21Za3JzU4rosvyiFHfM5VMB8GA1UdIwQYMBaAFH6X+clU0D49SZ9ezWRglsF8 -3glvMEYGA1UdEQQ/MD2CBGRhcHKCCWxvY2FsaG9zdIIHa2Fma2EtMYIHa2Fma2Et -MoIHa2Fma2EtM4IPa2FmYWstYm9vdHN0cmFwMA0GCSqGSIb3DQEBCwUAA4IBAQC1 -pNBOCYLN0CA3VZO+Pz+b7XnAUAZFisgH2/eBQBPOU9b4ImCLIASBqAMEJ4uq//MW -IiF9iu1YcncXPP/8rPnEsxKLBVcjWH48PneBcW3qb/9he+xXCnB7fqMUDkggzTeH -4ouZyY5/GnlQYgmFNgOIyI4pydoD8GkJQh88LzEn/YAKi0aVkwBMJ2eb2Fiz05WW -TKqWZKNnOjLPz5fIYNCR+uZtuqADhqItyaBa+X9NVIQ9cPcPMohZS4o+DtrCQevf -6QZuQEYh3IIY8Smi4+2OKUE0Gy2AnEKaEdwxbrCKYhuF/sUrLm76LmIH75HQJxyM -zE20cNgzX3yurenT3tbN ------END CERTIFICATE----- diff --git a/tests/certification/bindings/kafka/strimzi-listener-certs/tls.key b/tests/certification/bindings/kafka/strimzi-listener-certs/tls.key deleted file mode 100644 index 04f4f2e5a..000000000 --- a/tests/certification/bindings/kafka/strimzi-listener-certs/tls.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpQIBAAKCAQEAu46XmVZojYtSPdY+me2eVvM9zArqrjYwqJxtjY8ck+5JUGBT -ADJUNI+b/ceYVEOpLOCU5ggWiP4D7YwycqQj8ECra/UCBAlozNbUkjrP5buafRan -d9JJXIrxfXxKbsrouiqUVaGxnYPG2duzIeMnntXyNQ4QI+TBOk4t7C5gObsUWCyz -6JZTSAoFLEDBNoia5t9K1JkqmyqQe9tVC3Ae5C059zz6jA1N6ply005YqDd/oV7g -tGkkuXP+nyMB0P5dmywKQhfay3Pm5EhQQTDhGQ/4qJmhOyaJRxlLe7xITm0sYJye -mR9AvpNGP0S3RvXJpq3SD+kh8vFGF+DtFLB8TwIDAQABAoIBAEchmh8eZUKhPwyS -r2VDeBSz5ZD35u8xQB0CTo4sY4M7EPT5wyDE9aiFenyx8PSsQIHznqTrFljYNXcm -/47472RTsm+cGSqcwvFE3JOk2GnhT4L3T4Yi6428aD/WHoiMTd0k/uLHEwyRCJ5h -Mzu74a/cpiI29ioWvK23LrVvFTFvOro3UgJvyK4HUS/bg5gnjqMnh87eIrLhbpNI -0zuoRKcAVIeQWOQ2CvfRAMmijrlp+VLovIqjn/xspvFwCYPMA2ocfjxOC/n6F7/4 -jc8+/Q46xYO+3+1svU2cH0ptyxibk24Iqr+yTtMAx7gs4t7hIOYtRMAw4qCMxHyW -/hpc3OECgYEA0hd2q2gnadEOIAcKlWrAV772yXyqHKybezbvZHW/U8jOULLZGnpS -sddCxHE6x8nxmf3YnO3QTYYLTeY9XdlR9Z5Xydu0HzZeGJIxd3wSGZ2hTz4WgbVn -86JpikQBISW2/6T3MKFDsxhbLmivBrVdjVV1/TRM+UG5YL3bb0/wyz8CgYEA5IqK -AoJ+zaMGkt6HD4GM7XxSuhICbCITrV5Obkxh17tLguYuAME/WdGbIISrcS/B41KR -YkJWHMuvGxjW3GF/chcith4k8VDb51Pov1TqvelVDywSOIROUfht6NhtPYajaIGj -GAC5oYOoQpfH7m5ubmbYh1ueb+POfO3hKtIzWvECgYEAkUTwJW2Lczu+zJ6RzudV -wFanRoMRDWq8x+IgfhJ9DW4YWuyP+iMC8z2pSTQSNPuKN7SzBy/ZjQFW57KAVFhk -t7WZdlaYocxyHANaeQgta9D3LVf9MAtDqc9vss97CHSPqQ1kbxfTPA9nXRu9iqH1 -4jhpsX9sih3MFPyysrFQCvkCgYEAgTjUUBb5G8zSKrkoJNxbkux42jzUoc+i0KRC -NJt7tz9vstPzrvmVmHOsAvcA+T7HooFNMwHPLvj8SZYB5xo5tYjfV5ozyT6vGF2Z -fJXHJRqJvcptgwdMQYz2mHHHUsKOIskqLqg6TdjjisPHiElop4P/aomjTCDC4GCg -sFWqNAECgYEAzOQT86+Rz9NdVfDo/C/IqsMK9fmwMjGXFaBnVBuKMv1VBrh4Zh3g -E8QrdKwf/pxR/G91O2dBXmqhUKFX+cEgtEvqhdCyVdR4jQhAKE4hsDd2q4psqbB2 -BUqaBzo3GawDeKcwCqSPLu7tBKFJCEWjkQZhIVB1lZ8d30i2LSVv2NM= ------END RSA PRIVATE KEY----- diff --git a/tests/certification/pubsub/kafka/docker-compose.yml b/tests/certification/pubsub/kafka/docker-compose.yml index 057030d49..4e4f62a0a 100644 --- a/tests/certification/pubsub/kafka/docker-compose.yml +++ b/tests/certification/pubsub/kafka/docker-compose.yml @@ -1,68 +1,68 @@ -version: "3.7" -services: - zookeeper: - image: confluentinc/cp-zookeeper:5.4.0 - hostname: zookeeper - container_name: zookeeper - ports: - - "2181:2181" - environment: - ZOOKEEPER_CLIENT_PORT: 2181 - ZOOKEEPER_TICK_TIME: 2000 - - kafka1: - image: confluentinc/cp-server:5.4.0 - hostname: kafka1 - container_name: kafka1 - depends_on: - - zookeeper - ports: - - "19092:19092" - environment: - KAFKA_BROKER_ID: 1 - KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181' - KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT - KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka1:9092,PLAINTEXT_HOST://localhost:19092 - KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3 - KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 - KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: 1 - KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true' - KAFKA_NUM_PARTITIONS: 10 - - kafka2: - image: confluentinc/cp-server:5.4.0 - hostname: kafka2 - container_name: kafka2 - depends_on: - - zookeeper - ports: - - "29092:29092" - environment: - KAFKA_BROKER_ID: 2 - KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181' - KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT - KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka2:9092,PLAINTEXT_HOST://localhost:29092 - KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3 - KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 - KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: 1 - KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true' - KAFKA_NUM_PARTITIONS: 10 - - kafka3: - image: confluentinc/cp-server:5.4.0 - hostname: kafka3 - container_name: kafka3 - depends_on: - - zookeeper - ports: - - "39092:39092" - environment: - KAFKA_BROKER_ID: 3 - KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181' - KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT - KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka3:9092,PLAINTEXT_HOST://localhost:39092 - KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3 - KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 - KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: 1 - KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true' +version: "3.7" +services: + zookeeper: + image: confluentinc/cp-zookeeper:7.3.0 + hostname: zookeeper + container_name: zookeeper + ports: + - "2181:2181" + environment: + ZOOKEEPER_CLIENT_PORT: 2181 + ZOOKEEPER_TICK_TIME: 2000 + + kafka1: + image: confluentinc/cp-server:7.3.0 + hostname: kafka1 + container_name: kafka1 + depends_on: + - zookeeper + ports: + - "19092:19092" + environment: + KAFKA_BROKER_ID: 1 + KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181' + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT + KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka1:9092,PLAINTEXT_HOST://localhost:19092 + KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3 + KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 + KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: 1 + KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true' + KAFKA_NUM_PARTITIONS: 10 + + kafka2: + image: confluentinc/cp-server:7.3.0 + hostname: kafka2 + container_name: kafka2 + depends_on: + - zookeeper + ports: + - "29092:29092" + environment: + KAFKA_BROKER_ID: 2 + KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181' + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT + KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka2:9092,PLAINTEXT_HOST://localhost:29092 + KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3 + KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 + KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: 1 + KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true' + KAFKA_NUM_PARTITIONS: 10 + + kafka3: + image: confluentinc/cp-server:7.3.0 + hostname: kafka3 + container_name: kafka3 + depends_on: + - zookeeper + ports: + - "39092:39092" + environment: + KAFKA_BROKER_ID: 3 + KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181' + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT + KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka3:9092,PLAINTEXT_HOST://localhost:39092 + KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3 + KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 + KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: 1 + KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true' KAFKA_NUM_PARTITIONS: 10 \ No newline at end of file