update test suite
This commit is contained in:
parent
822726d400
commit
2feda257dd
|
@ -36,7 +36,7 @@ RUN go generate && CGO_ENABLED=0 GOOS=linux go build -o /usr/bin/spark-operator
|
|||
|
||||
FROM ${SPARK_IMAGE}
|
||||
COPY --from=builder /usr/bin/spark-operator /usr/bin/
|
||||
USER 185
|
||||
USER root
|
||||
|
||||
# Comment out the following three lines if you do not have a RedHat subscription.
|
||||
COPY hack/install_packages.sh /
|
||||
|
@ -45,7 +45,7 @@ RUN rm /install_packages.sh
|
|||
|
||||
RUN chmod -R u+x /tmp
|
||||
|
||||
RUN apk add --no-cache openssl curl tini
|
||||
COPY hack/gencerts.sh /usr/bin/
|
||||
COPY entrypoint.sh /usr/bin/
|
||||
USER 185
|
||||
ENTRYPOINT ["/usr/bin/entrypoint.sh"]
|
||||
|
|
|
@ -1,11 +1,25 @@
|
|||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Copyright 2019 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
set -e
|
||||
arg1=$(head -2 /opt/spark/credentials | tail -1)
|
||||
arg2=$(head -3 /opt/spark/credentials | tail -1)
|
||||
arg3=$(head -1 /opt/spark/credentials | tail -1)
|
||||
|
||||
subscription-manager register --username=$arg1 --password=$arg2 --name=docker
|
||||
subscription-manager attach --pool=$arg3 && \
|
||||
yum install -y openssl
|
||||
subscription-manager remove --al
|
||||
subscription-manager attach --pool=$arg3 && yum install -y openssl
|
||||
subscription-manager remove --all
|
||||
subscription-manager unregister
|
||||
subscription-manager clean
|
||||
|
|
|
@ -0,0 +1,27 @@
|
|||
#!/usr/bin/env bash
|
||||
#
|
||||
# Copyright 2019 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
SCRIPT=`basename ${BASH_SOURCE[0]}`
|
||||
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd -P )"
|
||||
set -e
|
||||
platforms=("linux:amd64" "darwin:amd64")
|
||||
for platform in "${platforms[@]}"
|
||||
do
|
||||
GOOS="${platform%%:*}"
|
||||
GOARCH="${platform#*:}"
|
||||
echo $GOOS
|
||||
echo $GOARCH
|
||||
CGO_ENABLED=0 GOOS=$GOOS GOARCH=$GOARCH go build -o sparkctl-${GOOS}-${GOARCH}
|
||||
done
|
|
@ -31,7 +31,7 @@ import (
|
|||
)
|
||||
|
||||
func getJobStatus(t *testing.T) v1beta1.ApplicationStateType {
|
||||
app, err := appFramework.GetSparkApplication(framework.SparkApplicationClient, "default", "spark-pi")
|
||||
app, err := appFramework.GetSparkApplication(framework.SparkApplicationClient, appFramework.SparkTestNamespace, "spark-pi")
|
||||
assert.Equal(t, nil, err)
|
||||
return app.Status.AppState.State
|
||||
}
|
||||
|
@ -40,12 +40,24 @@ func TestSubmitSparkPiYaml(t *testing.T) {
|
|||
t.Parallel()
|
||||
|
||||
// Wait for test job to finish. Time out after 90 seconds.
|
||||
timeout := 100 * time.Second
|
||||
timeout := 300 * time.Second
|
||||
interval := 5 * time.Second
|
||||
|
||||
sa, err := appFramework.MakeSparkApplicationFromYaml("../../examples/spark-pi.yaml")
|
||||
if appFramework.SparkTestNamespace != "" {
|
||||
sa.ObjectMeta.Namespace = appFramework.SparkTestNamespace
|
||||
}
|
||||
|
||||
if appFramework.SparkTestServiceAccount != "" {
|
||||
sa.Spec.Driver.ServiceAccount = &appFramework.SparkTestServiceAccount
|
||||
}
|
||||
|
||||
if appFramework.SparkTestImage != "" {
|
||||
sa.Spec.Image = &appFramework.SparkTestImage
|
||||
}
|
||||
|
||||
assert.Equal(t, nil, err)
|
||||
err = appFramework.CreateSparkApplication(framework.SparkApplicationClient, "default", sa)
|
||||
err = appFramework.CreateSparkApplication(framework.SparkApplicationClient, appFramework.SparkTestNamespace, sa)
|
||||
assert.Equal(t, nil, err)
|
||||
|
||||
status := getJobStatus(t)
|
||||
|
@ -58,12 +70,12 @@ func TestSubmitSparkPiYaml(t *testing.T) {
|
|||
return false, nil
|
||||
})
|
||||
|
||||
app, _ := appFramework.GetSparkApplication(framework.SparkApplicationClient, "default", "spark-pi")
|
||||
app, _ := appFramework.GetSparkApplication(framework.SparkApplicationClient, appFramework.SparkTestNamespace, "spark-pi")
|
||||
podName := app.Status.DriverInfo.PodName
|
||||
rawLogs, err := framework.KubeClient.CoreV1().Pods("default").GetLogs(podName, &v1.PodLogOptions{}).Do().Raw()
|
||||
rawLogs, err := framework.KubeClient.CoreV1().Pods(appFramework.SparkTestNamespace).GetLogs(podName, &v1.PodLogOptions{}).Do().Raw()
|
||||
assert.Equal(t, nil, err)
|
||||
assert.NotEqual(t, -1, strings.Index(string(rawLogs), "Pi is roughly 3"))
|
||||
|
||||
err = appFramework.DeleteSparkApplication(framework.SparkApplicationClient, "default", "spark-pi")
|
||||
err = appFramework.DeleteSparkApplication(framework.SparkApplicationClient, appFramework.SparkTestNamespace, "spark-pi")
|
||||
assert.Equal(t, nil, err)
|
||||
}
|
||||
|
|
|
@ -42,8 +42,12 @@ type Framework struct {
|
|||
DefaultTimeout time.Duration
|
||||
}
|
||||
|
||||
var SparkTestNamespace = ""
|
||||
var SparkTestServiceAccount = ""
|
||||
var SparkTestImage = ""
|
||||
|
||||
// Sets up a test framework and returns it.
|
||||
func New(ns, kubeconfig, opImage string) (*Framework, error) {
|
||||
func New(ns, kubeconfig, opImage string, opImagePullPolicy string) (*Framework, error) {
|
||||
config, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "build config from flags failed")
|
||||
|
@ -72,7 +76,7 @@ func New(ns, kubeconfig, opImage string) (*Framework, error) {
|
|||
DefaultTimeout: time.Minute,
|
||||
}
|
||||
|
||||
err = f.Setup(opImage)
|
||||
err = f.Setup(opImage, opImagePullPolicy)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "setup test environment failed")
|
||||
}
|
||||
|
@ -80,15 +84,15 @@ func New(ns, kubeconfig, opImage string) (*Framework, error) {
|
|||
return f, nil
|
||||
}
|
||||
|
||||
func (f *Framework) Setup(opImage string) error {
|
||||
if err := f.setupOperator(opImage); err != nil {
|
||||
func (f *Framework) Setup(opImage string, opImagePullPolicy string) error {
|
||||
if err := f.setupOperator(opImage, opImagePullPolicy); err != nil {
|
||||
return errors.Wrap(err, "setup operator failed")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Framework) setupOperator(opImage string) error {
|
||||
func (f *Framework) setupOperator(opImage string, opImagePullPolicy string) error {
|
||||
if _, err := CreateServiceAccount(f.KubeClient, f.Namespace.Name, "../../manifest/spark-operator-rbac.yaml"); err != nil && !apierrors.IsAlreadyExists(err) {
|
||||
return errors.Wrap(err, "failed to create operator service account")
|
||||
}
|
||||
|
@ -111,6 +115,10 @@ func (f *Framework) setupOperator(opImage string) error {
|
|||
deploy.Spec.Template.Spec.Containers[0].Image = opImage
|
||||
}
|
||||
|
||||
for _, container := range deploy.Spec.Template.Spec.Containers {
|
||||
container.ImagePullPolicy = v1.PullPolicy(opImagePullPolicy)
|
||||
}
|
||||
|
||||
err = CreateDeployment(f.KubeClient, f.Namespace.Name, deploy)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -30,7 +30,11 @@ var framework *operatorFramework.Framework
|
|||
func TestMain(m *testing.M) {
|
||||
kubeconfig := flag.String("kubeconfig", "", "kube config path, e.g. $HOME/.kube/config")
|
||||
opImage := flag.String("operator-image", "", "operator image, e.g. image:tag")
|
||||
opImagePullPolicy := flag.String("operator-image-pullPolicy", "IfNotPresent", "pull policy, e.g. Always")
|
||||
ns := flag.String("namespace", "spark-operator", "e2e test namespace")
|
||||
sparkTestNamespace := flag.String("spark-test-namespace", "default", "e2e test spark-test-namespace")
|
||||
sparkTestImage := flag.String("spark-test-image", "", "spark test image, e.g. image:tag")
|
||||
sparkTestServiceAccount := flag.String("spark-test-service-account", "default", "e2e test spark test service account")
|
||||
flag.Parse()
|
||||
|
||||
if *kubeconfig == "" {
|
||||
|
@ -39,10 +43,13 @@ func TestMain(m *testing.M) {
|
|||
}
|
||||
|
||||
var err error
|
||||
if framework, err = operatorFramework.New(*ns, *kubeconfig, *opImage); err != nil {
|
||||
if framework, err = operatorFramework.New(*ns, *kubeconfig, *opImage, *opImagePullPolicy); err != nil {
|
||||
log.Fatalf("failed to set up framework: %v\n", err)
|
||||
}
|
||||
|
||||
operatorFramework.SparkTestNamespace = *sparkTestNamespace
|
||||
operatorFramework.SparkTestImage = *sparkTestImage
|
||||
operatorFramework.SparkTestServiceAccount = *sparkTestServiceAccount
|
||||
code := m.Run()
|
||||
|
||||
if err := framework.Teardown(); err != nil {
|
||||
|
|
Loading…
Reference in New Issue