Add cron support

This commit is contained in:
Yinan Li 2018-04-17 09:55:42 -07:00
parent 3624ad80de
commit eff4c59cb2
50 changed files with 2304 additions and 304 deletions

510
Gopkg.lock generated
View File

@ -3,15 +3,27 @@
[[projects]]
name = "cloud.google.com/go"
packages = ["compute/metadata","iam","internal","internal/optional","internal/version","storage"]
packages = [
"compute/metadata",
"iam",
"internal",
"internal/optional",
"internal/version",
"storage"
]
revision = "767c40d6a2e058483c25fa193e963a22da17236d"
version = "v0.18.0"
[[projects]]
name = "github.com/Azure/go-autorest"
packages = ["autorest","autorest/adal","autorest/azure","autorest/date"]
revision = "c2a68353555b68de3ee8455a4fd3e890a0ac6d99"
version = "v9.8.1"
packages = [
"autorest",
"autorest/adal",
"autorest/azure",
"autorest/date"
]
revision = "fc3b03a2d2d1f43fad3007038bd16f044f870722"
version = "v9.10.0"
[[projects]]
name = "github.com/PuerkitoBio/purell"
@ -34,26 +46,26 @@
[[projects]]
name = "github.com/dgrijalva/jwt-go"
packages = ["."]
revision = "dbeaa9332f19a944acb5736b4456cfcc02140e29"
version = "v3.1.0"
revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e"
version = "v3.2.0"
[[projects]]
branch = "master"
name = "github.com/docker/spdystream"
packages = [".","spdy"]
packages = [
".",
"spdy"
]
revision = "bc6354cbbc295e925e4c611ffe90c1f287ee54db"
[[projects]]
name = "github.com/emicklei/go-restful"
packages = [".","log"]
revision = "2dd44038f0b95ae693b266c5f87593b5d2fdd78d"
version = "v2.5.0"
[[projects]]
name = "github.com/emicklei/go-restful-swagger12"
packages = ["."]
revision = "dcef7f55730566d41eae5db10e7d6981829720f6"
version = "1.0.1"
packages = [
".",
"log"
]
revision = "92cd0815dd1a028a6e69faee9757c7436238e252"
version = "v2.6.1"
[[projects]]
name = "github.com/ghodss/yaml"
@ -65,31 +77,34 @@
branch = "master"
name = "github.com/go-openapi/jsonpointer"
packages = ["."]
revision = "779f45308c19820f1a69e9a4cd965f496e0da10f"
revision = "3a0015ad55fa9873f41605d3e8f28cd279c32ab2"
[[projects]]
branch = "master"
name = "github.com/go-openapi/jsonreference"
packages = ["."]
revision = "36d33bfe519efae5632669801b180bf1a245da3b"
revision = "3fb327e6747da3043567ee86abd02bb6376b6be2"
[[projects]]
branch = "master"
name = "github.com/go-openapi/spec"
packages = ["."]
revision = "fa03337d7da5735229ee8f5e9d5d0b996014b7f8"
revision = "bcff419492eeeb01f76e77d2ebc714dc97b607f5"
[[projects]]
branch = "master"
name = "github.com/go-openapi/swag"
packages = ["."]
revision = "84f4bee7c0a6db40e3166044c7983c1c32125429"
revision = "811b1089cde9dad18d4d0c2d09fbdbf28dbd27a5"
[[projects]]
name = "github.com/gogo/protobuf"
packages = ["proto","sortkeys"]
revision = "342cbe0a04158f6dcb03ca0079991a51a4248c02"
version = "v0.5"
packages = [
"proto",
"sortkeys"
]
revision = "1adfc126b41513cc696b209667c8656ea7aac67c"
version = "v1.0.0"
[[projects]]
branch = "master"
@ -101,13 +116,20 @@
branch = "master"
name = "github.com/golang/groupcache"
packages = ["lru"]
revision = "84a468cf14b4376def5d68c722b139b881c450a4"
revision = "66deaeb636dff1ac7d938ce666d090556056a4b0"
[[projects]]
branch = "master"
name = "github.com/golang/protobuf"
packages = ["proto","protoc-gen-go/descriptor","ptypes","ptypes/any","ptypes/duration","ptypes/timestamp"]
revision = "c65a0412e71e8b9b3bfd22925720d23c0f054237"
packages = [
"proto",
"protoc-gen-go/descriptor",
"ptypes",
"ptypes/any",
"ptypes/duration",
"ptypes/timestamp"
]
revision = "925541529c1fa6821df4e44ce2723319eb2be768"
version = "v1.0.0"
[[projects]]
branch = "master"
@ -129,27 +151,45 @@
[[projects]]
name = "github.com/googleapis/gnostic"
packages = ["OpenAPIv2","compiler","extensions"]
packages = [
"OpenAPIv2",
"compiler",
"extensions"
]
revision = "ee43cbb60db7bd22502942cccbc39059117352ab"
version = "v0.1.0"
[[projects]]
branch = "master"
name = "github.com/gophercloud/gophercloud"
packages = [".","openstack","openstack/identity/v2/tenants","openstack/identity/v2/tokens","openstack/identity/v3/tokens","openstack/utils","pagination"]
revision = "4a3f5ae58624b68283375060dad06a214b05a32b"
packages = [
".",
"openstack",
"openstack/identity/v2/tenants",
"openstack/identity/v2/tokens",
"openstack/identity/v3/tokens",
"openstack/utils",
"pagination"
]
revision = "b06207adc597d227dd874274305c96e49c3015e3"
[[projects]]
branch = "master"
name = "github.com/gregjones/httpcache"
packages = [".","diskcache"]
revision = "2bcd89a1743fd4b373f7370ce8ddc14dfbd18229"
packages = [
".",
"diskcache"
]
revision = "9cad4c3443a7200dd6400aef47183728de563a38"
[[projects]]
branch = "master"
name = "github.com/hashicorp/golang-lru"
packages = [".","simplelru"]
revision = "0a025b7e63adc15a622f29b0b2c4c3848243bbf6"
packages = [
".",
"simplelru"
]
revision = "0fb14efe8c47ae851c0034ed7a448854d3d34cf3"
[[projects]]
branch = "master"
@ -160,8 +200,8 @@
[[projects]]
name = "github.com/imdario/mergo"
packages = ["."]
revision = "163f41321a19dd09362d4c63cc2489db2015f1f4"
version = "0.3.2"
revision = "9d5f1277e9a8ed20c3684bda8fde67c05628518c"
version = "v0.3.4"
[[projects]]
name = "github.com/inconshreveable/mousetrap"
@ -172,8 +212,8 @@
[[projects]]
name = "github.com/json-iterator/go"
packages = ["."]
revision = "f7279a603edee96fe7764d3de9c6ff8cf9970994"
version = "1.0.4"
revision = "ca39e5af3ece67bbcda3d0f4f56a8e24d9f2dad4"
version = "1.1.3"
[[projects]]
name = "github.com/juju/ratelimit"
@ -184,8 +224,12 @@
[[projects]]
branch = "master"
name = "github.com/mailru/easyjson"
packages = ["buffer","jlexer","jwriter"]
revision = "32fa128f234d041f196a9f3e0fea5ac9772c08e1"
packages = [
"buffer",
"jlexer",
"jwriter"
]
revision = "8b799c424f57fa123fc63a99d6383bc6e4c02578"
[[projects]]
name = "github.com/mattn/go-runewidth"
@ -193,11 +237,23 @@
revision = "9e777a8366cce605130a531d2cd6363d07ad7317"
version = "v0.0.2"
[[projects]]
name = "github.com/modern-go/concurrent"
packages = ["."]
revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94"
version = "1.0.3"
[[projects]]
name = "github.com/modern-go/reflect2"
packages = ["."]
revision = "1df9eeb2bb81f327b96228865c5687bc2194af3f"
version = "1.0.0"
[[projects]]
branch = "master"
name = "github.com/olekukonko/tablewriter"
packages = ["."]
revision = "96aac992fc8b1a4c83841a6c3e7178d20d989625"
revision = "b8a9be070da40449e501c3c4730a889e42d87a9e"
[[projects]]
branch = "master"
@ -217,123 +273,409 @@
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
version = "v1.0.0"
[[projects]]
name = "github.com/robfig/cron"
packages = ["."]
revision = "df38d32658d8788cd446ba74db4bb5375c4b0cb3"
[[projects]]
name = "github.com/spf13/cobra"
packages = ["."]
revision = "7b2c5ac9fc04fc5efafb60700713d4fa609b777b"
version = "v0.0.1"
revision = "a1f051bc3eba734da4772d60e2d677f47cf93ef4"
version = "v0.0.2"
[[projects]]
name = "github.com/spf13/pflag"
packages = ["."]
revision = "e57e3eeb33f795204c1ca35f56c44f83227c6e66"
version = "v1.0.0"
revision = "583c0c0531f06d5278b7d917446061adc344b5cd"
version = "v1.0.1"
[[projects]]
name = "github.com/stretchr/testify"
packages = ["assert"]
revision = "b91bfb9ebec76498946beb6af7c0230c7cc7ba6c"
version = "v1.2.0"
revision = "12b6f73e6084dad08a7c6e575284b177ecafbc71"
version = "v1.2.1"
[[projects]]
name = "go.opencensus.io"
packages = [
"exporter/stackdriver/propagation",
"internal",
"internal/tagencoding",
"plugin/ochttp",
"plugin/ochttp/propagation/b3",
"stats",
"stats/internal",
"stats/view",
"tag",
"trace",
"trace/internal",
"trace/propagation"
]
revision = "0095aec66ae14801c6711210f6f0716411cefdd3"
version = "v0.8.0"
[[projects]]
branch = "master"
name = "golang.org/x/crypto"
packages = ["ssh/terminal"]
revision = "3d37316aaa6bd9929127ac9a527abf408178ea7b"
revision = "d6449816ce06963d9d136eee5a56fca5b0616e7e"
[[projects]]
branch = "master"
name = "golang.org/x/net"
packages = ["context","context/ctxhttp","http2","http2/hpack","idna","internal/timeseries","lex/httplex","trace"]
revision = "0ed95abb35c445290478a5348a7b38bb154135fd"
packages = [
"context",
"context/ctxhttp",
"http/httpguts",
"http2",
"http2/hpack",
"idna",
"internal/timeseries",
"lex/httplex",
"trace"
]
revision = "8d16fa6dc9a85c1cd3ed24ad08ff21cf94f10888"
[[projects]]
branch = "master"
name = "golang.org/x/oauth2"
packages = [".","google","internal","jws","jwt"]
revision = "b28fcf2b08a19742b43084fb40ab78ac6c3d8067"
packages = [
".",
"google",
"internal",
"jws",
"jwt"
]
revision = "6881fee410a5daf86371371f9ad451b95e168b71"
[[projects]]
branch = "master"
name = "golang.org/x/sys"
packages = ["unix","windows"]
revision = "03467258950d845cd1877eab69461b98e8c09219"
packages = [
"unix",
"windows"
]
revision = "b126b21c05a91c856b027c16779c12e3bf236954"
[[projects]]
branch = "master"
name = "golang.org/x/text"
packages = ["collate","collate/build","internal/colltab","internal/gen","internal/tag","internal/triegen","internal/ucd","language","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable","width"]
revision = "e19ae1496984b1c655b8044a65c0300a3c878dd3"
packages = [
"collate",
"collate/build",
"internal/colltab",
"internal/gen",
"internal/tag",
"internal/triegen",
"internal/ucd",
"language",
"secure/bidirule",
"transform",
"unicode/bidi",
"unicode/cldr",
"unicode/norm",
"unicode/rangetable",
"width"
]
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
version = "v0.3.0"
[[projects]]
branch = "master"
name = "google.golang.org/api"
packages = ["gensupport","googleapi","googleapi/internal/uritemplates","googleapi/transport","internal","iterator","option","storage/v1","transport/http"]
revision = "a27ddaf5ce0ce688be462071a9e55566e60bb59e"
packages = [
"gensupport",
"googleapi",
"googleapi/internal/uritemplates",
"googleapi/transport",
"internal",
"iterator",
"option",
"storage/v1",
"transport/http"
]
revision = "9c79deebf7496e355d7e95d82d4af1fe4e769b2f"
[[projects]]
name = "google.golang.org/appengine"
packages = [".","internal","internal/app_identity","internal/base","internal/datastore","internal/log","internal/modules","internal/remote_api","internal/urlfetch","urlfetch"]
packages = [
".",
"internal",
"internal/app_identity",
"internal/base",
"internal/datastore",
"internal/log",
"internal/modules",
"internal/remote_api",
"internal/urlfetch",
"urlfetch"
]
revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a"
version = "v1.0.0"
[[projects]]
branch = "master"
name = "google.golang.org/genproto"
packages = ["googleapis/api/annotations","googleapis/iam/v1","googleapis/rpc/status"]
revision = "4eb30f4778eed4c258ba66527a0d4f9ec8a36c45"
packages = [
"googleapis/api/annotations",
"googleapis/iam/v1",
"googleapis/rpc/status"
]
revision = "7fd901a49ba6a7f87732eb344f6e3c5b19d1b200"
[[projects]]
name = "google.golang.org/grpc"
packages = [".","balancer","balancer/base","balancer/roundrobin","codes","connectivity","credentials","encoding","grpclb/grpc_lb_v1/messages","grpclog","internal","keepalive","metadata","naming","peer","resolver","resolver/dns","resolver/passthrough","stats","status","tap","transport"]
revision = "6b51017f791ae1cfbec89c52efdf444b13b550ef"
version = "v1.9.2"
packages = [
".",
"balancer",
"balancer/base",
"balancer/roundrobin",
"codes",
"connectivity",
"credentials",
"encoding",
"encoding/proto",
"grpclb/grpc_lb_v1/messages",
"grpclog",
"internal",
"keepalive",
"metadata",
"naming",
"peer",
"resolver",
"resolver/dns",
"resolver/passthrough",
"stats",
"status",
"tap",
"transport"
]
revision = "d11072e7ca9811b1100b80ca0269ac831f06d024"
version = "v1.11.3"
[[projects]]
name = "gopkg.in/inf.v0"
packages = ["."]
revision = "3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4"
version = "v0.9.0"
revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf"
version = "v0.9.1"
[[projects]]
branch = "v2"
name = "gopkg.in/yaml.v2"
packages = ["."]
revision = "d670f9405373e636a5a2765eea47fac0c9bc91a4"
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
version = "v2.2.1"
[[projects]]
branch = "release-1.9"
name = "k8s.io/api"
packages = ["admissionregistration/v1alpha1","apps/v1beta1","apps/v1beta2","authentication/v1","authentication/v1beta1","authorization/v1","authorization/v1beta1","autoscaling/v1","autoscaling/v2beta1","batch/v1","batch/v1beta1","batch/v2alpha1","certificates/v1beta1","core/v1","extensions/v1beta1","networking/v1","policy/v1beta1","rbac/v1","rbac/v1alpha1","rbac/v1beta1","scheduling/v1alpha1","settings/v1alpha1","storage/v1","storage/v1beta1"]
revision = "4df58c811fe2e65feb879227b2b245e4dc26e7ad"
version = "kubernetes-1.8.2"
packages = [
"admissionregistration/v1alpha1",
"admissionregistration/v1beta1",
"apps/v1",
"apps/v1beta1",
"apps/v1beta2",
"authentication/v1",
"authentication/v1beta1",
"authorization/v1",
"authorization/v1beta1",
"autoscaling/v1",
"autoscaling/v2beta1",
"batch/v1",
"batch/v1beta1",
"batch/v2alpha1",
"certificates/v1beta1",
"core/v1",
"events/v1beta1",
"extensions/v1beta1",
"networking/v1",
"policy/v1beta1",
"rbac/v1",
"rbac/v1alpha1",
"rbac/v1beta1",
"scheduling/v1alpha1",
"settings/v1alpha1",
"storage/v1",
"storage/v1alpha1",
"storage/v1beta1"
]
revision = "53400f2739169693b7f1f64cf856f6d3d780d3db"
[[projects]]
branch = "release-1.9"
name = "k8s.io/apiextensions-apiserver"
packages = ["pkg/apis/apiextensions","pkg/apis/apiextensions/v1beta1","pkg/client/clientset/clientset","pkg/client/clientset/clientset/fake","pkg/client/clientset/clientset/scheme","pkg/client/clientset/clientset/typed/apiextensions/v1beta1","pkg/client/clientset/clientset/typed/apiextensions/v1beta1/fake"]
revision = "e509bb64fe1116e12a32273a2032426aa1a5fd26"
version = "kubernetes-1.8.2"
packages = [
"pkg/apis/apiextensions",
"pkg/apis/apiextensions/v1beta1",
"pkg/client/clientset/clientset",
"pkg/client/clientset/clientset/fake",
"pkg/client/clientset/clientset/scheme",
"pkg/client/clientset/clientset/typed/apiextensions/v1beta1",
"pkg/client/clientset/clientset/typed/apiextensions/v1beta1/fake"
]
revision = "9c19edfd52157adc354e1b72596534b47f23cb44"
[[projects]]
branch = "release-1.9"
name = "k8s.io/apimachinery"
packages = ["pkg/api/equality","pkg/api/errors","pkg/api/meta","pkg/api/resource","pkg/apis/meta/internalversion","pkg/apis/meta/v1","pkg/apis/meta/v1/unstructured","pkg/apis/meta/v1alpha1","pkg/conversion","pkg/conversion/queryparams","pkg/conversion/unstructured","pkg/fields","pkg/labels","pkg/runtime","pkg/runtime/schema","pkg/runtime/serializer","pkg/runtime/serializer/json","pkg/runtime/serializer/protobuf","pkg/runtime/serializer/recognizer","pkg/runtime/serializer/streaming","pkg/runtime/serializer/versioning","pkg/selection","pkg/types","pkg/util/cache","pkg/util/clock","pkg/util/diff","pkg/util/errors","pkg/util/framer","pkg/util/httpstream","pkg/util/httpstream/spdy","pkg/util/intstr","pkg/util/json","pkg/util/mergepatch","pkg/util/net","pkg/util/runtime","pkg/util/sets","pkg/util/strategicpatch","pkg/util/validation","pkg/util/validation/field","pkg/util/wait","pkg/util/yaml","pkg/version","pkg/watch","third_party/forked/golang/json","third_party/forked/golang/netutil","third_party/forked/golang/reflect"]
revision = "019ae5ada31de202164b118aee88ee2d14075c31"
version = "kubernetes-1.8.0"
packages = [
"pkg/api/errors",
"pkg/api/meta",
"pkg/api/resource",
"pkg/apis/meta/internalversion",
"pkg/apis/meta/v1",
"pkg/apis/meta/v1/unstructured",
"pkg/apis/meta/v1alpha1",
"pkg/conversion",
"pkg/conversion/queryparams",
"pkg/fields",
"pkg/labels",
"pkg/runtime",
"pkg/runtime/schema",
"pkg/runtime/serializer",
"pkg/runtime/serializer/json",
"pkg/runtime/serializer/protobuf",
"pkg/runtime/serializer/recognizer",
"pkg/runtime/serializer/streaming",
"pkg/runtime/serializer/versioning",
"pkg/selection",
"pkg/types",
"pkg/util/cache",
"pkg/util/clock",
"pkg/util/diff",
"pkg/util/errors",
"pkg/util/framer",
"pkg/util/httpstream",
"pkg/util/httpstream/spdy",
"pkg/util/intstr",
"pkg/util/json",
"pkg/util/mergepatch",
"pkg/util/net",
"pkg/util/runtime",
"pkg/util/sets",
"pkg/util/strategicpatch",
"pkg/util/validation",
"pkg/util/validation/field",
"pkg/util/wait",
"pkg/util/yaml",
"pkg/version",
"pkg/watch",
"third_party/forked/golang/json",
"third_party/forked/golang/netutil",
"third_party/forked/golang/reflect"
]
revision = "4a1945a9cfdfa104d202059c7e64250618d8d009"
[[projects]]
branch = "release-6.0"
name = "k8s.io/client-go"
packages = ["discovery","discovery/fake","kubernetes","kubernetes/fake","kubernetes/scheme","kubernetes/typed/admissionregistration/v1alpha1","kubernetes/typed/admissionregistration/v1alpha1/fake","kubernetes/typed/apps/v1beta1","kubernetes/typed/apps/v1beta1/fake","kubernetes/typed/apps/v1beta2","kubernetes/typed/apps/v1beta2/fake","kubernetes/typed/authentication/v1","kubernetes/typed/authentication/v1/fake","kubernetes/typed/authentication/v1beta1","kubernetes/typed/authentication/v1beta1/fake","kubernetes/typed/authorization/v1","kubernetes/typed/authorization/v1/fake","kubernetes/typed/authorization/v1beta1","kubernetes/typed/authorization/v1beta1/fake","kubernetes/typed/autoscaling/v1","kubernetes/typed/autoscaling/v1/fake","kubernetes/typed/autoscaling/v2beta1","kubernetes/typed/autoscaling/v2beta1/fake","kubernetes/typed/batch/v1","kubernetes/typed/batch/v1/fake","kubernetes/typed/batch/v1beta1","kubernetes/typed/batch/v1beta1/fake","kubernetes/typed/batch/v2alpha1","kubernetes/typed/batch/v2alpha1/fake","kubernetes/typed/certificates/v1beta1","kubernetes/typed/certificates/v1beta1/fake","kubernetes/typed/core/v1","kubernetes/typed/core/v1/fake","kubernetes/typed/extensions/v1beta1","kubernetes/typed/extensions/v1beta1/fake","kubernetes/typed/networking/v1","kubernetes/typed/networking/v1/fake","kubernetes/typed/policy/v1beta1","kubernetes/typed/policy/v1beta1/fake","kubernetes/typed/rbac/v1","kubernetes/typed/rbac/v1/fake","kubernetes/typed/rbac/v1alpha1","kubernetes/typed/rbac/v1alpha1/fake","kubernetes/typed/rbac/v1beta1","kubernetes/typed/rbac/v1beta1/fake","kubernetes/typed/scheduling/v1alpha1","kubernetes/typed/scheduling/v1alpha1/fake","kubernetes/typed/settings/v1alpha1","kubernetes/typed/settings/v1alpha1/fake","kubernetes/typed/storage/v1","kubernetes/typed/storage/v1/fake","kubernetes/typed/storage/v1beta1","kubernetes/typed/storage/v1beta1/fake","pkg/version","plugin/pkg/client/auth","plugin/pkg/client/auth/azure","plugin/pkg/client/auth/gcp","plugin/pkg/client/auth/oidc","plugin/pkg/client/auth/openstack","rest","rest/watch","testing","third_party/forked/golang/template","tools/auth","tools/cache","tools/clientcmd","tools/clientcmd/api","tools/clientcmd/api/latest","tools/clientcmd/api/v1","tools/metrics","tools/pager","tools/portforward","tools/record","tools/reference","transport","transport/spdy","util/cert","util/flowcontrol","util/homedir","util/integer","util/jsonpath","util/workqueue"]
revision = "35ccd4336052e7d73018b1382413534936f34eee"
version = "kubernetes-1.8.2"
packages = [
"discovery",
"discovery/fake",
"kubernetes",
"kubernetes/fake",
"kubernetes/scheme",
"kubernetes/typed/admissionregistration/v1alpha1",
"kubernetes/typed/admissionregistration/v1alpha1/fake",
"kubernetes/typed/admissionregistration/v1beta1",
"kubernetes/typed/admissionregistration/v1beta1/fake",
"kubernetes/typed/apps/v1",
"kubernetes/typed/apps/v1/fake",
"kubernetes/typed/apps/v1beta1",
"kubernetes/typed/apps/v1beta1/fake",
"kubernetes/typed/apps/v1beta2",
"kubernetes/typed/apps/v1beta2/fake",
"kubernetes/typed/authentication/v1",
"kubernetes/typed/authentication/v1/fake",
"kubernetes/typed/authentication/v1beta1",
"kubernetes/typed/authentication/v1beta1/fake",
"kubernetes/typed/authorization/v1",
"kubernetes/typed/authorization/v1/fake",
"kubernetes/typed/authorization/v1beta1",
"kubernetes/typed/authorization/v1beta1/fake",
"kubernetes/typed/autoscaling/v1",
"kubernetes/typed/autoscaling/v1/fake",
"kubernetes/typed/autoscaling/v2beta1",
"kubernetes/typed/autoscaling/v2beta1/fake",
"kubernetes/typed/batch/v1",
"kubernetes/typed/batch/v1/fake",
"kubernetes/typed/batch/v1beta1",
"kubernetes/typed/batch/v1beta1/fake",
"kubernetes/typed/batch/v2alpha1",
"kubernetes/typed/batch/v2alpha1/fake",
"kubernetes/typed/certificates/v1beta1",
"kubernetes/typed/certificates/v1beta1/fake",
"kubernetes/typed/core/v1",
"kubernetes/typed/core/v1/fake",
"kubernetes/typed/events/v1beta1",
"kubernetes/typed/events/v1beta1/fake",
"kubernetes/typed/extensions/v1beta1",
"kubernetes/typed/extensions/v1beta1/fake",
"kubernetes/typed/networking/v1",
"kubernetes/typed/networking/v1/fake",
"kubernetes/typed/policy/v1beta1",
"kubernetes/typed/policy/v1beta1/fake",
"kubernetes/typed/rbac/v1",
"kubernetes/typed/rbac/v1/fake",
"kubernetes/typed/rbac/v1alpha1",
"kubernetes/typed/rbac/v1alpha1/fake",
"kubernetes/typed/rbac/v1beta1",
"kubernetes/typed/rbac/v1beta1/fake",
"kubernetes/typed/scheduling/v1alpha1",
"kubernetes/typed/scheduling/v1alpha1/fake",
"kubernetes/typed/settings/v1alpha1",
"kubernetes/typed/settings/v1alpha1/fake",
"kubernetes/typed/storage/v1",
"kubernetes/typed/storage/v1/fake",
"kubernetes/typed/storage/v1alpha1",
"kubernetes/typed/storage/v1alpha1/fake",
"kubernetes/typed/storage/v1beta1",
"kubernetes/typed/storage/v1beta1/fake",
"pkg/version",
"plugin/pkg/client/auth",
"plugin/pkg/client/auth/azure",
"plugin/pkg/client/auth/gcp",
"plugin/pkg/client/auth/oidc",
"plugin/pkg/client/auth/openstack",
"rest",
"rest/watch",
"testing",
"third_party/forked/golang/template",
"tools/auth",
"tools/cache",
"tools/clientcmd",
"tools/clientcmd/api",
"tools/clientcmd/api/latest",
"tools/clientcmd/api/v1",
"tools/metrics",
"tools/pager",
"tools/portforward",
"tools/record",
"tools/reference",
"transport",
"transport/spdy",
"util/buffer",
"util/cert",
"util/flowcontrol",
"util/homedir",
"util/integer",
"util/jsonpath",
"util/retry",
"util/workqueue"
]
revision = "65b43df093d1d129e9608582d98ac9bfbba7e486"
[[projects]]
branch = "master"
name = "k8s.io/kube-openapi"
packages = ["pkg/common"]
revision = "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3"
packages = [
"pkg/common",
"pkg/util/proto"
]
revision = "f442ecb314a3679150c272e2b9713d8deed5955d"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "b38ab52c44d1c54760fca3559177d6b68d117b9b37bc950465d0ab3e4b51b5e7"
inputs-digest = "3ab80822024d0dfcc1bb6ef3ae7c320a901d43b06f31fd1594704e6d836185af"
solver-name = "gps-cdcl"
solver-version = 1

View File

@ -55,21 +55,29 @@
name = "golang.org/x/net"
[[constraint]]
branch = "release-1.9"
name = "k8s.io/api"
version = "kubernetes-1.8.2"
[[constraint]]
branch = "release-1.9"
name = "k8s.io/apiextensions-apiserver"
version = "kubernetes-1.8.2"
[[constraint]]
branch = "release-1.9"
name = "k8s.io/apimachinery"
version = "kubernetes-1.8.2"
[[constraint]]
branch = "release-6.0"
name = "k8s.io/client-go"
version = "kubernetes-1.8.2"
[[override]]
name = "github.com/Azure/go-autorest"
version = "v9.1.0"
[[constraint]]
branch = "master"
name = "github.com/olekukonko/tablewriter"
[[constraint]]
name = "github.com/robfig/cron"
revision = "df38d32658d8788cd446ba74db4bb5375c4b0cb3"

View File

@ -48,6 +48,14 @@ The number of worker threads to use in the three places are controlled using com
`-initializer-threads` and `-submission-threads`, respectively. The default values for the flags are 10, 10, and 3,
respectively.
Spark Operator enables cache resynchronization so periodically the informers used by the operator will re-list existing
objects it manages and re-trigger resource events. The resynchronization interval in seconds can be configured using the
flag `-resync-interval`, with a default value of 30 seconds.
By default, Spark Operator will install the
[CustomResourceDefinitions](https://kubernetes.io/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/)
for the custom resources it managers. This can be disabled by setting the flag `-install-crds=false.`.
The initializer is an **optional** component and can be enabled or disabled using the `-enable-initializer` flag, which
defaults to `true`. Since the initializer is an alpha feature, it won't function in Kubernetes clusters without alpha
features enabled. In this case, it can be disabled by adding the argument `-enable-initializer=false` to

View File

@ -0,0 +1,44 @@
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: "sparkoperator.k8s.io/v1alpha1"
kind: ScheduledSparkApplication
metadata:
name: spark-pi-scheduled
namespace: default
spec:
schedule: "@every 5m"
concurrencyPolicy: Allow
template:
type: Scala
mode: cluster
image: "gcr.io/ynli-k8s/spark:v2.3.0"
mainClass: org.apache.spark.examples.SparkPi
mainApplicationFile: "local:///opt/spark/examples/jars/spark-examples_2.11-2.3.0.jar"
driver:
cores: 0.1
coreLimit: "200m"
memory: "512m"
labels:
version: 2.3.0
serviceAccount: spark
executor:
cores: 1
instances: 1
memory: "512m"
labels:
version: 2.3.0
restartPolicy: Never

38
main.go
View File

@ -29,6 +29,7 @@ import (
apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/clock"
clientset "k8s.io/client-go/kubernetes"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
"k8s.io/client-go/rest"
@ -36,7 +37,10 @@ import (
crdclientset "k8s.io/spark-on-k8s-operator/pkg/client/clientset/versioned"
crdinformers "k8s.io/spark-on-k8s-operator/pkg/client/informers/externalversions"
"k8s.io/spark-on-k8s-operator/pkg/controller"
"k8s.io/spark-on-k8s-operator/pkg/controller/scheduledsparkapplication"
"k8s.io/spark-on-k8s-operator/pkg/controller/sparkapplication"
ssacrd "k8s.io/spark-on-k8s-operator/pkg/crd/scheduledsparkapplication"
sacrd "k8s.io/spark-on-k8s-operator/pkg/crd/sparkapplication"
"k8s.io/spark-on-k8s-operator/pkg/initializer"
)
@ -47,12 +51,14 @@ var (
"out-of-cluster.")
enableInitializer = flag.Bool("enable-initializer", true, "Whether to enable the "+
"Spark pod initializer.")
installCRDs = flag.Bool("install-crds", true, "Whether to install CRDs")
initializerThreads = flag.Int("initializer-threads", 10, "Number of worker threads "+
"used by the Spark Pod initializer (if it's enabled).")
controllerThreads = flag.Int("controller-threads", 10, "Number of worker threads "+
"used by the SparkApplication controller.")
submissionRunnerThreads = flag.Int("submission-threads", 3, "Number of worker threads "+
"used by the SparkApplication submission runner.")
resyncInterval = flag.Int("resync-interval", 30, "Informer resync interval in seconds")
)
func main() {
@ -86,16 +92,35 @@ func main() {
glog.Fatal(err)
}
if *installCRDs {
glog.Infof("Creating CustomResourceDefinition %s", sacrd.FullName)
err = sacrd.CreateCRD(apiExtensionsClient)
if err != nil {
glog.Fatalf("failed to create CustomResourceDefinition %s: %v", sacrd.FullName, err)
}
glog.Infof("Creating CustomResourceDefinition %s", ssacrd.FullName)
err = ssacrd.CreateCRD(apiExtensionsClient)
if err != nil {
glog.Fatalf("failed to create CustomResourceDefinition %s: %v", ssacrd.FullName, err)
}
}
factory := crdinformers.NewSharedInformerFactory(
crdClient,
// resyncPeriod. Every resyncPeriod, all resources in the cache will re-trigger events.
300*time.Second)
sparkApplicationController := controller.New(crdClient, kubeClient, apiExtensionsClient, factory,
*submissionRunnerThreads)
time.Duration(*resyncInterval)*time.Second)
applicationController := sparkapplication.NewController(
crdClient, kubeClient, apiExtensionsClient, factory, *submissionRunnerThreads)
scheduledApplicationController := scheduledsparkapplication.NewController(
crdClient, kubeClient, apiExtensionsClient, factory, clock.RealClock{})
// Start the informer factory that in turn starts the informer.
go factory.Start(stopCh)
if err = sparkApplicationController.Start(*controllerThreads, stopCh); err != nil {
if err = applicationController.Start(*controllerThreads, stopCh); err != nil {
glog.Fatal(err)
}
if err = scheduledApplicationController.Start(*controllerThreads, stopCh); err != nil {
glog.Fatal(err)
}
@ -115,7 +140,8 @@ func main() {
close(stopCh)
glog.Info("Shutting down the Spark operator")
sparkApplicationController.Stop()
applicationController.Stop()
scheduledApplicationController.Stop()
if *enableInitializer {
sparkPodInitializer.Stop()
}

View File

@ -45,6 +45,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&SparkApplication{},
&SparkApplicationList{},
&ScheduledSparkApplication{},
&ScheduledSparkApplicationList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil

View File

@ -59,6 +59,74 @@ const (
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:defaulter-gen=true
type ScheduledSparkApplication struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata"`
Spec ScheduledSparkApplicationSpec `json:"spec"`
Status ScheduledSparkApplicationStatus `json:"status,omitempty"`
}
type ConcurrencyPolicy string
const (
// ConcurrencyAllow allows SparkApplications to run concurrently.
ConcurrencyAllow ConcurrencyPolicy = "Allow"
// ConcurrencyForbid forbids concurrent runs of SparkApplications, skipping the next run if the previous
// one hasn't finished yet.
ConcurrencyForbid ConcurrencyPolicy = "Forbid"
// ConcurrencyReplace kills the currently running SparkApplication instance and replaces it with a new one.
ConcurrencyReplace ConcurrencyPolicy = "Replace"
)
type ScheduledSparkApplicationSpec struct {
// Schedule is a cron schedule on which the application should run.
Schedule string `json:"schedule"`
// Template is a template from which SparkApplication instances can be created.
Template SparkApplicationSpec `json:"template"`
// Suspend is a flag telling the controller to suspend subsequent runs of the application if set to true.
Suspend *bool `json:"suspend,omitempty"`
// ConcurrencyPolicy is the policy governing concurrent SparkApplication runs.
ConcurrencyPolicy ConcurrencyPolicy `json:"concurrencyPolicy,omitempty"`
// RunHistoryLimit specifies the number of past runs of the application to remember.
RunHistoryLimit *int32 `json:"runHistoryLimit,omitempty"`
}
type ScheduleState string
const (
FailedValidationState ScheduleState = "FailedValidation"
ScheduledState ScheduleState = "Scheduled"
)
type ScheduledSparkApplicationStatus struct {
// LastRun is the time when the last run of the application started.
LastRun metav1.Time `json:"lastRun,omitempty"`
// NextRun is the time when the next run of the application will start.
NextRun metav1.Time `json:"nextRun,omitempty"`
// PastRunNames keeps the names of SparkApplications for past runs.
// It keeps up to Spec.RunHistoryLimit number of past SparkApplication names,
// in reverse order of time when the SparkApplications get created.
PastRunNames []string `json:"pastRunNames,omitempty"`
// ScheduleState is the current scheduling state of the application.
ScheduleState ScheduleState `json:"scheduleState,omitempty"`
// Reason tells why the ScheduledSparkApplication is in the particular ScheduleState.
Reason string `json:"reason,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ScheduledSparkApplicationList carries a list of ScheduledSparkApplication objects.
type ScheduledSparkApplicationList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []ScheduledSparkApplication `json:"items,omitempty"`
}
// +genclient
// +genclient:noStatus
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:defaulter-gen=true
// SparkApplication represents a Spark application running on and using Kubernetes as a cluster manager.
type SparkApplication struct {
metav1.TypeMeta `json:",inline"`

View File

@ -1,7 +1,7 @@
// +build !ignore_autogenerated
/*
Copyright 2018 The Kubernetes Authors.
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha1
@ -201,6 +201,125 @@ func (in *NamePath) DeepCopy() *NamePath {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScheduledSparkApplication) DeepCopyInto(out *ScheduledSparkApplication) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledSparkApplication.
func (in *ScheduledSparkApplication) DeepCopy() *ScheduledSparkApplication {
if in == nil {
return nil
}
out := new(ScheduledSparkApplication)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ScheduledSparkApplication) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScheduledSparkApplicationList) DeepCopyInto(out *ScheduledSparkApplicationList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ScheduledSparkApplication, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledSparkApplicationList.
func (in *ScheduledSparkApplicationList) DeepCopy() *ScheduledSparkApplicationList {
if in == nil {
return nil
}
out := new(ScheduledSparkApplicationList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ScheduledSparkApplicationList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScheduledSparkApplicationSpec) DeepCopyInto(out *ScheduledSparkApplicationSpec) {
*out = *in
in.Template.DeepCopyInto(&out.Template)
if in.Suspend != nil {
in, out := &in.Suspend, &out.Suspend
if *in == nil {
*out = nil
} else {
*out = new(bool)
**out = **in
}
}
if in.RunHistoryLimit != nil {
in, out := &in.RunHistoryLimit, &out.RunHistoryLimit
if *in == nil {
*out = nil
} else {
*out = new(int32)
**out = **in
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledSparkApplicationSpec.
func (in *ScheduledSparkApplicationSpec) DeepCopy() *ScheduledSparkApplicationSpec {
if in == nil {
return nil
}
out := new(ScheduledSparkApplicationSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScheduledSparkApplicationStatus) DeepCopyInto(out *ScheduledSparkApplicationStatus) {
*out = *in
in.LastRun.DeepCopyInto(&out.LastRun)
in.NextRun.DeepCopyInto(&out.NextRun)
if in.PastRunNames != nil {
in, out := &in.PastRunNames, &out.PastRunNames
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledSparkApplicationStatus.
func (in *ScheduledSparkApplicationStatus) DeepCopy() *ScheduledSparkApplicationStatus {
if in == nil {
return nil
}
out := new(ScheduledSparkApplicationStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SecretInfo) DeepCopyInto(out *SecretInfo) {
*out = *in
@ -241,9 +360,8 @@ func (in *SparkApplication) DeepCopy() *SparkApplication {
func (in *SparkApplication) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -275,9 +393,8 @@ func (in *SparkApplicationList) DeepCopy() *SparkApplicationList {
func (in *SparkApplicationList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

View File

@ -1,5 +1,5 @@
/*
Copyright 2018 The Kubernetes Authors.
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -14,10 +14,11 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package versioned
import (
glog "github.com/golang/glog"
discovery "k8s.io/client-go/discovery"
rest "k8s.io/client-go/rest"
flowcontrol "k8s.io/client-go/util/flowcontrol"
@ -72,7 +73,6 @@ func NewForConfig(c *rest.Config) (*Clientset, error) {
cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy)
if err != nil {
glog.Errorf("failed to create the DiscoveryClient: %v", err)
return nil, err
}
return &cs, nil

View File

@ -1,5 +1,5 @@
/*
Copyright 2018 The Kubernetes Authors.
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -14,5 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
// This package has the automatically generated clientset.
package versioned

View File

@ -1,5 +1,5 @@
/*
Copyright 2018 The Kubernetes Authors.
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -14,6 +14,8 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (

View File

@ -1,5 +1,5 @@
/*
Copyright 2018 The Kubernetes Authors.
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -14,5 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
// This package has the automatically generated fake clientset.
package fake

View File

@ -1,5 +1,5 @@
/*
Copyright 2018 The Kubernetes Authors.
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -14,6 +14,8 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
@ -38,7 +40,7 @@ func init() {
//
// import (
// "k8s.io/client-go/kubernetes"
// clientsetscheme "k8s.io/client-go/kuberentes/scheme"
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
// )
//
@ -49,5 +51,4 @@ func init() {
// correctly.
func AddToScheme(scheme *runtime.Scheme) {
sparkoperatorv1alpha1.AddToScheme(scheme)
}

View File

@ -1,5 +1,5 @@
/*
Copyright 2018 The Kubernetes Authors.
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -14,5 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
// This package contains the scheme of the automatically generated clientset.
package scheme

View File

@ -1,5 +1,5 @@
/*
Copyright 2018 The Kubernetes Authors.
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -14,6 +14,8 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package scheme
import (
@ -38,7 +40,7 @@ func init() {
//
// import (
// "k8s.io/client-go/kubernetes"
// clientsetscheme "k8s.io/client-go/kuberentes/scheme"
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
// )
//
@ -49,5 +51,4 @@ func init() {
// correctly.
func AddToScheme(scheme *runtime.Scheme) {
sparkoperatorv1alpha1.AddToScheme(scheme)
}

View File

@ -1,5 +1,5 @@
/*
Copyright 2018 The Kubernetes Authors.
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -14,5 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
// This package has the automatically generated typed clients.
package v1alpha1

View File

@ -1,5 +1,5 @@
/*
Copyright 2018 The Kubernetes Authors.
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -14,5 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
// Package fake has the automatically generated clients.
package fake

View File

@ -0,0 +1,128 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
schema "k8s.io/apimachinery/pkg/runtime/schema"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
testing "k8s.io/client-go/testing"
v1alpha1 "k8s.io/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1alpha1"
)
// FakeScheduledSparkApplications implements ScheduledSparkApplicationInterface
type FakeScheduledSparkApplications struct {
Fake *FakeSparkoperatorV1alpha1
ns string
}
var scheduledsparkapplicationsResource = schema.GroupVersionResource{Group: "sparkoperator", Version: "v1alpha1", Resource: "scheduledsparkapplications"}
var scheduledsparkapplicationsKind = schema.GroupVersionKind{Group: "sparkoperator", Version: "v1alpha1", Kind: "ScheduledSparkApplication"}
// Get takes name of the scheduledSparkApplication, and returns the corresponding scheduledSparkApplication object, and an error if there is any.
func (c *FakeScheduledSparkApplications) Get(name string, options v1.GetOptions) (result *v1alpha1.ScheduledSparkApplication, err error) {
obj, err := c.Fake.
Invokes(testing.NewGetAction(scheduledsparkapplicationsResource, c.ns, name), &v1alpha1.ScheduledSparkApplication{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.ScheduledSparkApplication), err
}
// List takes label and field selectors, and returns the list of ScheduledSparkApplications that match those selectors.
func (c *FakeScheduledSparkApplications) List(opts v1.ListOptions) (result *v1alpha1.ScheduledSparkApplicationList, err error) {
obj, err := c.Fake.
Invokes(testing.NewListAction(scheduledsparkapplicationsResource, scheduledsparkapplicationsKind, c.ns, opts), &v1alpha1.ScheduledSparkApplicationList{})
if obj == nil {
return nil, err
}
label, _, _ := testing.ExtractFromListOptions(opts)
if label == nil {
label = labels.Everything()
}
list := &v1alpha1.ScheduledSparkApplicationList{}
for _, item := range obj.(*v1alpha1.ScheduledSparkApplicationList).Items {
if label.Matches(labels.Set(item.Labels)) {
list.Items = append(list.Items, item)
}
}
return list, err
}
// Watch returns a watch.Interface that watches the requested scheduledSparkApplications.
func (c *FakeScheduledSparkApplications) Watch(opts v1.ListOptions) (watch.Interface, error) {
return c.Fake.
InvokesWatch(testing.NewWatchAction(scheduledsparkapplicationsResource, c.ns, opts))
}
// Create takes the representation of a scheduledSparkApplication and creates it. Returns the server's representation of the scheduledSparkApplication, and an error, if there is any.
func (c *FakeScheduledSparkApplications) Create(scheduledSparkApplication *v1alpha1.ScheduledSparkApplication) (result *v1alpha1.ScheduledSparkApplication, err error) {
obj, err := c.Fake.
Invokes(testing.NewCreateAction(scheduledsparkapplicationsResource, c.ns, scheduledSparkApplication), &v1alpha1.ScheduledSparkApplication{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.ScheduledSparkApplication), err
}
// Update takes the representation of a scheduledSparkApplication and updates it. Returns the server's representation of the scheduledSparkApplication, and an error, if there is any.
func (c *FakeScheduledSparkApplications) Update(scheduledSparkApplication *v1alpha1.ScheduledSparkApplication) (result *v1alpha1.ScheduledSparkApplication, err error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateAction(scheduledsparkapplicationsResource, c.ns, scheduledSparkApplication), &v1alpha1.ScheduledSparkApplication{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.ScheduledSparkApplication), err
}
// Delete takes name of the scheduledSparkApplication and deletes it. Returns an error if one occurs.
func (c *FakeScheduledSparkApplications) Delete(name string, options *v1.DeleteOptions) error {
_, err := c.Fake.
Invokes(testing.NewDeleteAction(scheduledsparkapplicationsResource, c.ns, name), &v1alpha1.ScheduledSparkApplication{})
return err
}
// DeleteCollection deletes a collection of objects.
func (c *FakeScheduledSparkApplications) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
action := testing.NewDeleteCollectionAction(scheduledsparkapplicationsResource, c.ns, listOptions)
_, err := c.Fake.Invokes(action, &v1alpha1.ScheduledSparkApplicationList{})
return err
}
// Patch applies the patch and returns the patched scheduledSparkApplication.
func (c *FakeScheduledSparkApplications) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ScheduledSparkApplication, err error) {
obj, err := c.Fake.
Invokes(testing.NewPatchSubresourceAction(scheduledsparkapplicationsResource, c.ns, name, data, subresources...), &v1alpha1.ScheduledSparkApplication{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.ScheduledSparkApplication), err
}

View File

@ -1,5 +1,5 @@
/*
Copyright 2018 The Kubernetes Authors.
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -14,6 +14,8 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (

View File

@ -1,5 +1,5 @@
/*
Copyright 2018 The Kubernetes Authors.
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -14,6 +14,8 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
@ -26,6 +28,10 @@ type FakeSparkoperatorV1alpha1 struct {
*testing.Fake
}
func (c *FakeSparkoperatorV1alpha1) ScheduledSparkApplications(namespace string) v1alpha1.ScheduledSparkApplicationInterface {
return &FakeScheduledSparkApplications{c, namespace}
}
func (c *FakeSparkoperatorV1alpha1) SparkApplications(namespace string) v1alpha1.SparkApplicationInterface {
return &FakeSparkApplications{c, namespace}
}

View File

@ -1,5 +1,5 @@
/*
Copyright 2018 The Kubernetes Authors.
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -14,6 +14,10 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
type ScheduledSparkApplicationExpansion interface{}
type SparkApplicationExpansion interface{}

View File

@ -0,0 +1,157 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
rest "k8s.io/client-go/rest"
v1alpha1 "k8s.io/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1alpha1"
scheme "k8s.io/spark-on-k8s-operator/pkg/client/clientset/versioned/scheme"
)
// ScheduledSparkApplicationsGetter has a method to return a ScheduledSparkApplicationInterface.
// A group's client should implement this interface.
type ScheduledSparkApplicationsGetter interface {
ScheduledSparkApplications(namespace string) ScheduledSparkApplicationInterface
}
// ScheduledSparkApplicationInterface has methods to work with ScheduledSparkApplication resources.
type ScheduledSparkApplicationInterface interface {
Create(*v1alpha1.ScheduledSparkApplication) (*v1alpha1.ScheduledSparkApplication, error)
Update(*v1alpha1.ScheduledSparkApplication) (*v1alpha1.ScheduledSparkApplication, error)
Delete(name string, options *v1.DeleteOptions) error
DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
Get(name string, options v1.GetOptions) (*v1alpha1.ScheduledSparkApplication, error)
List(opts v1.ListOptions) (*v1alpha1.ScheduledSparkApplicationList, error)
Watch(opts v1.ListOptions) (watch.Interface, error)
Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ScheduledSparkApplication, err error)
ScheduledSparkApplicationExpansion
}
// scheduledSparkApplications implements ScheduledSparkApplicationInterface
type scheduledSparkApplications struct {
client rest.Interface
ns string
}
// newScheduledSparkApplications returns a ScheduledSparkApplications
func newScheduledSparkApplications(c *SparkoperatorV1alpha1Client, namespace string) *scheduledSparkApplications {
return &scheduledSparkApplications{
client: c.RESTClient(),
ns: namespace,
}
}
// Get takes name of the scheduledSparkApplication, and returns the corresponding scheduledSparkApplication object, and an error if there is any.
func (c *scheduledSparkApplications) Get(name string, options v1.GetOptions) (result *v1alpha1.ScheduledSparkApplication, err error) {
result = &v1alpha1.ScheduledSparkApplication{}
err = c.client.Get().
Namespace(c.ns).
Resource("scheduledsparkapplications").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do().
Into(result)
return
}
// List takes label and field selectors, and returns the list of ScheduledSparkApplications that match those selectors.
func (c *scheduledSparkApplications) List(opts v1.ListOptions) (result *v1alpha1.ScheduledSparkApplicationList, err error) {
result = &v1alpha1.ScheduledSparkApplicationList{}
err = c.client.Get().
Namespace(c.ns).
Resource("scheduledsparkapplications").
VersionedParams(&opts, scheme.ParameterCodec).
Do().
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested scheduledSparkApplications.
func (c *scheduledSparkApplications) Watch(opts v1.ListOptions) (watch.Interface, error) {
opts.Watch = true
return c.client.Get().
Namespace(c.ns).
Resource("scheduledsparkapplications").
VersionedParams(&opts, scheme.ParameterCodec).
Watch()
}
// Create takes the representation of a scheduledSparkApplication and creates it. Returns the server's representation of the scheduledSparkApplication, and an error, if there is any.
func (c *scheduledSparkApplications) Create(scheduledSparkApplication *v1alpha1.ScheduledSparkApplication) (result *v1alpha1.ScheduledSparkApplication, err error) {
result = &v1alpha1.ScheduledSparkApplication{}
err = c.client.Post().
Namespace(c.ns).
Resource("scheduledsparkapplications").
Body(scheduledSparkApplication).
Do().
Into(result)
return
}
// Update takes the representation of a scheduledSparkApplication and updates it. Returns the server's representation of the scheduledSparkApplication, and an error, if there is any.
func (c *scheduledSparkApplications) Update(scheduledSparkApplication *v1alpha1.ScheduledSparkApplication) (result *v1alpha1.ScheduledSparkApplication, err error) {
result = &v1alpha1.ScheduledSparkApplication{}
err = c.client.Put().
Namespace(c.ns).
Resource("scheduledsparkapplications").
Name(scheduledSparkApplication.Name).
Body(scheduledSparkApplication).
Do().
Into(result)
return
}
// Delete takes name of the scheduledSparkApplication and deletes it. Returns an error if one occurs.
func (c *scheduledSparkApplications) Delete(name string, options *v1.DeleteOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("scheduledsparkapplications").
Name(name).
Body(options).
Do().
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *scheduledSparkApplications) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("scheduledsparkapplications").
VersionedParams(&listOptions, scheme.ParameterCodec).
Body(options).
Do().
Error()
}
// Patch applies the patch and returns the patched scheduledSparkApplication.
func (c *scheduledSparkApplications) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ScheduledSparkApplication, err error) {
result = &v1alpha1.ScheduledSparkApplication{}
err = c.client.Patch(pt).
Namespace(c.ns).
Resource("scheduledsparkapplications").
SubResource(subresources...).
Name(name).
Body(data).
Do().
Into(result)
return
}

View File

@ -1,5 +1,5 @@
/*
Copyright 2018 The Kubernetes Authors.
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -14,6 +14,8 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
import (

View File

@ -1,5 +1,5 @@
/*
Copyright 2018 The Kubernetes Authors.
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -14,6 +14,8 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
import (
@ -25,6 +27,7 @@ import (
type SparkoperatorV1alpha1Interface interface {
RESTClient() rest.Interface
ScheduledSparkApplicationsGetter
SparkApplicationsGetter
}
@ -33,6 +36,10 @@ type SparkoperatorV1alpha1Client struct {
restClient rest.Interface
}
func (c *SparkoperatorV1alpha1Client) ScheduledSparkApplications(namespace string) ScheduledSparkApplicationInterface {
return newScheduledSparkApplications(c, namespace)
}
func (c *SparkoperatorV1alpha1Client) SparkApplications(namespace string) SparkApplicationInterface {
return newSparkApplications(c, namespace)
}

View File

@ -1,5 +1,5 @@
/*
Copyright 2018 The Kubernetes Authors.
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was automatically generated by informer-gen
// Code generated by informer-gen. DO NOT EDIT.
package externalversions
@ -32,12 +32,16 @@ import (
sparkoperator_k8s_io "k8s.io/spark-on-k8s-operator/pkg/client/informers/externalversions/sparkoperator.k8s.io"
)
// SharedInformerOption defines the functional option type for SharedInformerFactory.
type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory
type sharedInformerFactory struct {
client versioned.Interface
namespace string
tweakListOptions internalinterfaces.TweakListOptionsFunc
lock sync.Mutex
defaultResync time.Duration
customResync map[reflect.Type]time.Duration
informers map[reflect.Type]cache.SharedIndexInformer
// startedInformers is used for tracking which informers have been started.
@ -45,23 +49,62 @@ type sharedInformerFactory struct {
startedInformers map[reflect.Type]bool
}
// NewSharedInformerFactory constructs a new instance of sharedInformerFactory
// WithCustomResyncConfig sets a custom resync period for the specified informer types.
func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
for k, v := range resyncConfig {
factory.customResync[reflect.TypeOf(k)] = v
}
return factory
}
}
// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory.
func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
factory.tweakListOptions = tweakListOptions
return factory
}
}
// WithNamespace limits the SharedInformerFactory to the specified namespace.
func WithNamespace(namespace string) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
factory.namespace = namespace
return factory
}
}
// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces.
func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory {
return NewFilteredSharedInformerFactory(client, defaultResync, v1.NamespaceAll, nil)
return NewSharedInformerFactoryWithOptions(client, defaultResync)
}
// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory.
// Listers obtained via this SharedInformerFactory will be subject to the same filters
// as specified here.
// Deprecated: Please use NewSharedInformerFactoryWithOptions instead
func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory {
return &sharedInformerFactory{
return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions))
}
// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options.
func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory {
factory := &sharedInformerFactory{
client: client,
namespace: namespace,
tweakListOptions: tweakListOptions,
namespace: v1.NamespaceAll,
defaultResync: defaultResync,
informers: make(map[reflect.Type]cache.SharedIndexInformer),
startedInformers: make(map[reflect.Type]bool),
customResync: make(map[reflect.Type]time.Duration),
}
// Apply all options
for _, opt := range options {
factory = opt(factory)
}
return factory
}
// Start initializes all requested informers.
@ -110,7 +153,13 @@ func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internal
if exists {
return informer
}
informer = newFunc(f.client, f.defaultResync)
resyncPeriod, exists := f.customResync[informerType]
if !exists {
resyncPeriod = f.defaultResync
}
informer = newFunc(f.client, resyncPeriod)
f.informers[informerType] = informer
return informer

View File

@ -1,5 +1,5 @@
/*
Copyright 2018 The Kubernetes Authors.
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was automatically generated by informer-gen
// Code generated by informer-gen. DO NOT EDIT.
package externalversions
@ -53,6 +53,8 @@ func (f *genericInformer) Lister() cache.GenericLister {
func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) {
switch resource {
// Group=sparkoperator, Version=v1alpha1
case v1alpha1.SchemeGroupVersion.WithResource("scheduledsparkapplications"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Sparkoperator().V1alpha1().ScheduledSparkApplications().Informer()}, nil
case v1alpha1.SchemeGroupVersion.WithResource("sparkapplications"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Sparkoperator().V1alpha1().SparkApplications().Informer()}, nil

View File

@ -1,5 +1,5 @@
/*
Copyright 2018 The Kubernetes Authors.
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was automatically generated by informer-gen
// Code generated by informer-gen. DO NOT EDIT.
package internalinterfaces

View File

@ -1,5 +1,5 @@
/*
Copyright 2018 The Kubernetes Authors.
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was automatically generated by informer-gen
// Code generated by informer-gen. DO NOT EDIT.
package sparkoperator

View File

@ -1,5 +1,5 @@
/*
Copyright 2018 The Kubernetes Authors.
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was automatically generated by informer-gen
// Code generated by informer-gen. DO NOT EDIT.
package v1alpha1
@ -24,6 +24,8 @@ import (
// Interface provides access to all the informers in this group version.
type Interface interface {
// ScheduledSparkApplications returns a ScheduledSparkApplicationInformer.
ScheduledSparkApplications() ScheduledSparkApplicationInformer
// SparkApplications returns a SparkApplicationInformer.
SparkApplications() SparkApplicationInformer
}
@ -39,6 +41,11 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList
return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
}
// ScheduledSparkApplications returns a ScheduledSparkApplicationInformer.
func (v *version) ScheduledSparkApplications() ScheduledSparkApplicationInformer {
return &scheduledSparkApplicationInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
}
// SparkApplications returns a SparkApplicationInformer.
func (v *version) SparkApplications() SparkApplicationInformer {
return &sparkApplicationInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}

View File

@ -0,0 +1,89 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1alpha1
import (
time "time"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
cache "k8s.io/client-go/tools/cache"
sparkoperator_k8s_io_v1alpha1 "k8s.io/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1alpha1"
versioned "k8s.io/spark-on-k8s-operator/pkg/client/clientset/versioned"
internalinterfaces "k8s.io/spark-on-k8s-operator/pkg/client/informers/externalversions/internalinterfaces"
v1alpha1 "k8s.io/spark-on-k8s-operator/pkg/client/listers/sparkoperator.k8s.io/v1alpha1"
)
// ScheduledSparkApplicationInformer provides access to a shared informer and lister for
// ScheduledSparkApplications.
type ScheduledSparkApplicationInformer interface {
Informer() cache.SharedIndexInformer
Lister() v1alpha1.ScheduledSparkApplicationLister
}
type scheduledSparkApplicationInformer struct {
factory internalinterfaces.SharedInformerFactory
tweakListOptions internalinterfaces.TweakListOptionsFunc
namespace string
}
// NewScheduledSparkApplicationInformer constructs a new informer for ScheduledSparkApplication type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewScheduledSparkApplicationInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
return NewFilteredScheduledSparkApplicationInformer(client, namespace, resyncPeriod, indexers, nil)
}
// NewFilteredScheduledSparkApplicationInformer constructs a new informer for ScheduledSparkApplication type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredScheduledSparkApplicationInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.SparkoperatorV1alpha1().ScheduledSparkApplications(namespace).List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.SparkoperatorV1alpha1().ScheduledSparkApplications(namespace).Watch(options)
},
},
&sparkoperator_k8s_io_v1alpha1.ScheduledSparkApplication{},
resyncPeriod,
indexers,
)
}
func (f *scheduledSparkApplicationInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
return NewFilteredScheduledSparkApplicationInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
}
func (f *scheduledSparkApplicationInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&sparkoperator_k8s_io_v1alpha1.ScheduledSparkApplication{}, f.defaultInformer)
}
func (f *scheduledSparkApplicationInformer) Lister() v1alpha1.ScheduledSparkApplicationLister {
return v1alpha1.NewScheduledSparkApplicationLister(f.Informer().GetIndexer())
}

View File

@ -1,5 +1,5 @@
/*
Copyright 2018 The Kubernetes Authors.
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was automatically generated by informer-gen
// Code generated by informer-gen. DO NOT EDIT.
package v1alpha1

View File

@ -1,5 +1,5 @@
/*
Copyright 2018 The Kubernetes Authors.
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -14,10 +14,18 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was automatically generated by lister-gen
// Code generated by lister-gen. DO NOT EDIT.
package v1alpha1
// ScheduledSparkApplicationListerExpansion allows custom methods to be added to
// ScheduledSparkApplicationLister.
type ScheduledSparkApplicationListerExpansion interface{}
// ScheduledSparkApplicationNamespaceListerExpansion allows custom methods to be added to
// ScheduledSparkApplicationNamespaceLister.
type ScheduledSparkApplicationNamespaceListerExpansion interface{}
// SparkApplicationListerExpansion allows custom methods to be added to
// SparkApplicationLister.
type SparkApplicationListerExpansion interface{}

View File

@ -0,0 +1,94 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1alpha1
import (
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/cache"
v1alpha1 "k8s.io/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1alpha1"
)
// ScheduledSparkApplicationLister helps list ScheduledSparkApplications.
type ScheduledSparkApplicationLister interface {
// List lists all ScheduledSparkApplications in the indexer.
List(selector labels.Selector) (ret []*v1alpha1.ScheduledSparkApplication, err error)
// ScheduledSparkApplications returns an object that can list and get ScheduledSparkApplications.
ScheduledSparkApplications(namespace string) ScheduledSparkApplicationNamespaceLister
ScheduledSparkApplicationListerExpansion
}
// scheduledSparkApplicationLister implements the ScheduledSparkApplicationLister interface.
type scheduledSparkApplicationLister struct {
indexer cache.Indexer
}
// NewScheduledSparkApplicationLister returns a new ScheduledSparkApplicationLister.
func NewScheduledSparkApplicationLister(indexer cache.Indexer) ScheduledSparkApplicationLister {
return &scheduledSparkApplicationLister{indexer: indexer}
}
// List lists all ScheduledSparkApplications in the indexer.
func (s *scheduledSparkApplicationLister) List(selector labels.Selector) (ret []*v1alpha1.ScheduledSparkApplication, err error) {
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
ret = append(ret, m.(*v1alpha1.ScheduledSparkApplication))
})
return ret, err
}
// ScheduledSparkApplications returns an object that can list and get ScheduledSparkApplications.
func (s *scheduledSparkApplicationLister) ScheduledSparkApplications(namespace string) ScheduledSparkApplicationNamespaceLister {
return scheduledSparkApplicationNamespaceLister{indexer: s.indexer, namespace: namespace}
}
// ScheduledSparkApplicationNamespaceLister helps list and get ScheduledSparkApplications.
type ScheduledSparkApplicationNamespaceLister interface {
// List lists all ScheduledSparkApplications in the indexer for a given namespace.
List(selector labels.Selector) (ret []*v1alpha1.ScheduledSparkApplication, err error)
// Get retrieves the ScheduledSparkApplication from the indexer for a given namespace and name.
Get(name string) (*v1alpha1.ScheduledSparkApplication, error)
ScheduledSparkApplicationNamespaceListerExpansion
}
// scheduledSparkApplicationNamespaceLister implements the ScheduledSparkApplicationNamespaceLister
// interface.
type scheduledSparkApplicationNamespaceLister struct {
indexer cache.Indexer
namespace string
}
// List lists all ScheduledSparkApplications in the indexer for a given namespace.
func (s scheduledSparkApplicationNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.ScheduledSparkApplication, err error) {
err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
ret = append(ret, m.(*v1alpha1.ScheduledSparkApplication))
})
return ret, err
}
// Get retrieves the ScheduledSparkApplication from the indexer for a given namespace and name.
func (s scheduledSparkApplicationNamespaceLister) Get(name string) (*v1alpha1.ScheduledSparkApplication, error) {
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(v1alpha1.Resource("scheduledsparkapplication"), name)
}
return obj.(*v1alpha1.ScheduledSparkApplication), nil
}

View File

@ -1,5 +1,5 @@
/*
Copyright 2018 The Kubernetes Authors.
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was automatically generated by lister-gen
// Code generated by lister-gen. DO NOT EDIT.
package v1alpha1

View File

@ -16,12 +16,20 @@ limitations under the License.
package controller
// Package controller implements the CustomResourceDefinition (CRD) controller for SparkApplications.
// The controller is responsible for watching SparkApplication objects and submitting Spark applications
// described by the specs in the objects on behalf of users. After an application is submitted, the
// controller monitors the application state and updates the status field of the SparkApplication object
// accordingly. The controller uses a sparkSubmitRunner to submit applications to run in the Kubernetes
// cluster where Spark Operator runs. The sparkSubmitRunner maintains a set of workers, each of which is
// a goroutine, for actually running the spark-submit commands. The controller also uses a sparkPodMonitor
// to watch Spark driver and executor pods. The sparkPodMonitor sends driver and executor state updates
// to the controller, which then updates status field of SparkApplication objects accordingly.
// Package controller implements the CustomResourceDefinition (CRD) controller for SparkApplications and
// ScheduledSparkApplications.
//
// The ScheduledSparkApplication controller is responsible for watching ScheduledSparkApplications objects
// and scheduling them according to the cron schedule in the ScheduledSparkApplication specification. For
// each ScheduledSparkApplication, the controller creates a new SparkApplication instance when the next run
// of the application is due and the condition for starting the next run is satisfied.
//
// The SparkApplication controller is responsible for watching SparkApplication objects and submitting
// Spark applications described by the specs in the objects on behalf of users. After an application is
// submitted, the controller monitors the application state and updates the status field of the
// SparkApplication object accordingly. The controller uses a sparkSubmitRunner to submit applications
// to run in the Kubernetes cluster where Spark Operator runs. The sparkSubmitRunner maintains a set of
// workers, each of which is a goroutine, for actually running the spark-submit commands. The controller
// also uses a sparkPodMonitor to watch Spark driver and executor pods. The sparkPodMonitor sends driver
// and executor state updates to the controller, which then updates status field of SparkApplication
// objects accordingly.

View File

@ -0,0 +1,381 @@
/*
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduledsparkapplication
import (
"fmt"
"reflect"
"time"
"github.com/golang/glog"
"github.com/robfig/cron"
apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/clock"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/retry"
"k8s.io/client-go/util/workqueue"
"k8s.io/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1alpha1"
crdclientset "k8s.io/spark-on-k8s-operator/pkg/client/clientset/versioned"
crdscheme "k8s.io/spark-on-k8s-operator/pkg/client/clientset/versioned/scheme"
crdinformers "k8s.io/spark-on-k8s-operator/pkg/client/informers/externalversions"
crdlisters "k8s.io/spark-on-k8s-operator/pkg/client/listers/sparkoperator.k8s.io/v1alpha1"
)
var (
keyFunc = cache.DeletionHandlingMetaNamespaceKeyFunc
)
type Controller struct {
crdClient crdclientset.Interface
kubeClient kubernetes.Interface
extensionsClient apiextensionsclient.Interface
queue workqueue.RateLimitingInterface
cacheSynced cache.InformerSynced
lister crdlisters.ScheduledSparkApplicationLister
clock clock.Clock
}
func NewController(
crdClient crdclientset.Interface,
kubeClient kubernetes.Interface,
extensionsClient apiextensionsclient.Interface,
informerFactory crdinformers.SharedInformerFactory,
clock clock.Clock) *Controller {
crdscheme.AddToScheme(scheme.Scheme)
queue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(),
"scheduled-spark-application-controller")
controller := &Controller{
crdClient: crdClient,
kubeClient: kubeClient,
extensionsClient: extensionsClient,
queue: queue,
clock: clock,
}
informer := informerFactory.Sparkoperator().V1alpha1().ScheduledSparkApplications()
informer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: controller.onAdd,
UpdateFunc: controller.onUpdate,
DeleteFunc: controller.onDelete,
})
controller.cacheSynced = informer.Informer().HasSynced
controller.lister = informer.Lister()
return controller
}
func (c *Controller) Start(workers int, stopCh <-chan struct{}) error {
glog.Info("Starting the ScheduledSparkApplication controller")
if !cache.WaitForCacheSync(stopCh, c.cacheSynced) {
return fmt.Errorf("timed out waiting for cache to sync")
}
glog.Info("Starting the workers of the ScheduledSparkApplication controller")
for i := 0; i < workers; i++ {
// runWorker will loop until "something bad" happens. Until will then rekick
// the worker after one second.
go wait.Until(c.runWorker, time.Second, stopCh)
}
return nil
}
func (c *Controller) Stop() {
glog.Info("Stopping the ScheduledSparkApplication controller")
c.queue.ShutDown()
}
func (c *Controller) runWorker() {
defer utilruntime.HandleCrash()
for c.processNextItem() {
}
}
func (c *Controller) processNextItem() bool {
key, quit := c.queue.Get()
if quit {
return false
}
defer c.queue.Done(key)
err := c.syncScheduledSparkApplication(key.(string))
if err == nil {
// Successfully processed the key or the key was not found so tell the queue to stop tracking
// history for your key. This will reset things like failure counts for per-item rate limiting.
c.queue.Forget(key)
return true
}
// There was a failure so be sure to report it. This method allows for pluggable error handling
// which can be used for things like cluster-monitoring
utilruntime.HandleError(fmt.Errorf("failed to sync ScheduledSparkApplication %q: %v", key, err))
// Since we failed, we should requeue the item to work on later. This method will add a backoff
// to avoid hot-looping on particular items (they're probably still not going to work right away)
// and overall controller protection (everything I've done is broken, this controller needs to
// calm down or it can starve other useful work) cases.
c.queue.AddRateLimited(key)
return true
}
func (c *Controller) syncScheduledSparkApplication(key string) error {
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return err
}
app, err := c.lister.ScheduledSparkApplications(namespace).Get(name)
if err != nil {
return err
}
if app.Spec.Suspend != nil && *app.Spec.Suspend {
return nil
}
glog.V(2).Infof("Syncing ScheduledSparkApplication %s", app.Name)
status := app.Status.DeepCopy()
schedule, err := cron.ParseStandard(app.Spec.Schedule)
if err != nil {
glog.Errorf("failed to parse schedule %s of %s: %v", app.Spec.Schedule, app.Name, err)
status.ScheduleState = v1alpha1.FailedValidationState
status.Reason = err.Error()
} else {
status.ScheduleState = v1alpha1.ScheduledState
nextRunTime := status.NextRun.Time
if nextRunTime.IsZero() {
nextRunTime = schedule.Next(status.LastRun.Time)
status.NextRun = metav1.NewTime(nextRunTime)
}
now := c.clock.Now()
if nextRunTime.Before(now) {
// The next run is due. Check if this is the first run of the application.
if len(status.PastRunNames) == 0 {
// This is the first run of the application.
if err = c.startNextRun(app, status, schedule); err != nil {
return err
}
} else {
// Check if the condition for starting the next run is satisfied.
ok, err := c.shouldStartNextRun(app)
if err != nil {
return err
}
if ok {
if err = c.startNextRun(app, status, schedule); err != nil {
return err
}
}
}
}
}
return c.updateScheduledSparkApplicationStatus(app, status)
}
func (c *Controller) onAdd(obj interface{}) {
c.enqueue(obj)
}
func (c *Controller) onUpdate(oldObj, newObj interface{}) {
c.enqueue(newObj)
}
func (c *Controller) onDelete(obj interface{}) {
c.dequeue(obj)
}
func (c *Controller) enqueue(obj interface{}) {
key, err := keyFunc(obj)
if err != nil {
glog.Errorf("failed to get key for %v: %v", obj, err)
return
}
c.queue.AddRateLimited(key)
}
func (c *Controller) dequeue(obj interface{}) {
key, err := keyFunc(obj)
if err != nil {
glog.Errorf("failed to get key for %v: %v", obj, err)
return
}
c.queue.Forget(key)
c.queue.Done(key)
}
func (c *Controller) createSparkApplication(
scheduledApp *v1alpha1.ScheduledSparkApplication, t time.Time) (string, error) {
app := &v1alpha1.SparkApplication{}
app.Spec = scheduledApp.Spec.Template
app.Name = fmt.Sprintf("%s-%d", scheduledApp.Name, t.UnixNano())
app.OwnerReferences = append(app.OwnerReferences, metav1.OwnerReference{
APIVersion: v1alpha1.SchemeGroupVersion.String(),
Kind: reflect.TypeOf(v1alpha1.ScheduledSparkApplication{}).Name(),
Name: scheduledApp.Name,
UID: scheduledApp.UID,
})
_, err := c.crdClient.SparkoperatorV1alpha1().SparkApplications(scheduledApp.Namespace).Create(app)
if err != nil {
return "", err
}
return app.Name, nil
}
func (c *Controller) shouldStartNextRun(app *v1alpha1.ScheduledSparkApplication) (bool, error) {
switch app.Spec.ConcurrencyPolicy {
case v1alpha1.ConcurrencyAllow:
return true, nil
case v1alpha1.ConcurrencyForbid:
finished, _, err := c.hasLastRunFinished(app.Namespace, app.Status.PastRunNames[0])
if err != nil {
return false, err
}
return finished, nil
case v1alpha1.ConcurrencyReplace:
if err := c.killLastRunIfNotFinished(app.Namespace, app.Status.PastRunNames[0]); err != nil {
return false, err
}
return true, nil
}
return true, nil
}
func (c *Controller) startNextRun(
app *v1alpha1.ScheduledSparkApplication,
status *v1alpha1.ScheduledSparkApplicationStatus,
schedule cron.Schedule) error {
glog.Infof("Next run of %s is due, creating a new SparkApplication instance", app.Name)
status.LastRun = metav1.Now()
name, err := c.createSparkApplication(app, status.LastRun.Time)
if err != nil {
glog.Errorf("failed to create a SparkApplication instance for %s: %v", app.Name, err)
return err
}
status.NextRun = metav1.NewTime(schedule.Next(status.LastRun.Time))
var limit int32 = 1
if app.Spec.RunHistoryLimit != nil {
limit = *app.Spec.RunHistoryLimit
}
rest := status.PastRunNames
var toDelete []string
if int32(len(status.PastRunNames)) >= limit {
rest = status.PastRunNames[:limit-1]
toDelete = status.PastRunNames[limit-1:]
}
// Pre-append the name of the latest run.
status.PastRunNames = append([]string{name}, rest...)
namespace := app.Namespace
// Delete runs that should no longer be kept.
for _, name := range toDelete {
c.crdClient.SparkoperatorV1alpha1().SparkApplications(namespace).Delete(name, metav1.NewDeleteOptions(0))
}
return nil
}
func (c *Controller) hasLastRunFinished(
namespace string,
lastRunName string) (bool, *v1alpha1.SparkApplication, error) {
app, err := c.crdClient.SparkoperatorV1alpha1().SparkApplications(namespace).Get(lastRunName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return true, nil, nil
}
return false, nil, err
}
return app.Status.AppState.State == v1alpha1.CompletedState ||
app.Status.AppState.State == v1alpha1.FailedState, app, nil
}
func (c *Controller) killLastRunIfNotFinished(namespace string, lastRunName string) error {
finished, app, err := c.hasLastRunFinished(namespace, lastRunName)
if err != nil {
return err
}
if app == nil || finished {
return nil
}
// Delete the driver pod of the last run if applicable.
if app.Status.DriverInfo.PodName != "" {
if err = c.kubeClient.CoreV1().Pods(namespace).Delete(app.Status.DriverInfo.PodName,
metav1.NewDeleteOptions(0)); err != nil {
return err
}
}
// Delete the SparkApplication object of the last run.
if err = c.crdClient.SparkoperatorV1alpha1().SparkApplications(namespace).Delete(lastRunName,
metav1.NewDeleteOptions(0)); err != nil {
return err
}
return nil
}
func (c *Controller) updateScheduledSparkApplicationStatus(
app *v1alpha1.ScheduledSparkApplication,
newStatus *v1alpha1.ScheduledSparkApplicationStatus) error {
// If the status has not changed, do not perform an update.
if isStatusEqual(newStatus, &app.Status) {
return nil
}
toUpdate := app.DeepCopy()
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
toUpdate.Status = *newStatus
_, updateErr := c.crdClient.SparkoperatorV1alpha1().ScheduledSparkApplications(toUpdate.Namespace).Update(
toUpdate)
if updateErr == nil {
return nil
}
result, err := c.crdClient.SparkoperatorV1alpha1().ScheduledSparkApplications(toUpdate.Namespace).Get(
toUpdate.Name, metav1.GetOptions{})
if err != nil {
return err
}
toUpdate = result
return updateErr
})
}
func isStatusEqual(newStatus, currentStatus *v1alpha1.ScheduledSparkApplicationStatus) bool {
return newStatus.ScheduleState == currentStatus.ScheduleState &&
newStatus.LastRun == currentStatus.LastRun &&
newStatus.NextRun == currentStatus.NextRun &&
reflect.DeepEqual(newStatus.PastRunNames, currentStatus.PastRunNames) &&
newStatus.Reason == currentStatus.Reason
}

View File

@ -0,0 +1,218 @@
/*
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduledsparkapplication
import (
"testing"
"time"
"github.com/robfig/cron"
"github.com/stretchr/testify/assert"
apiextensionsfake "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/clock"
kubeclientfake "k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/cache"
"k8s.io/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1alpha1"
crdclientfake "k8s.io/spark-on-k8s-operator/pkg/client/clientset/versioned/fake"
crdinformers "k8s.io/spark-on-k8s-operator/pkg/client/informers/externalversions"
)
func TestSyncScheduledSparkApplication(t *testing.T) {
var two int32 = 2
app := &v1alpha1.ScheduledSparkApplication{
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Name: "test-app",
},
Spec: v1alpha1.ScheduledSparkApplicationSpec{
Schedule: "@every 1m",
ConcurrencyPolicy: v1alpha1.ConcurrencyAllow,
RunHistoryLimit: &two,
},
}
c, informer, clk := newFakeController(app)
key, _ := cache.MetaNamespaceKeyFunc(app)
// This sync should start the first run.
err := c.syncScheduledSparkApplication(key)
if err != nil {
t.Fatal(err)
}
app, _ = c.crdClient.SparkoperatorV1alpha1().ScheduledSparkApplications(app.Namespace).Get(app.Name,
metav1.GetOptions{})
assert.Equal(t, v1alpha1.ScheduledState, app.Status.ScheduleState)
// The first run should have been started.
assert.Equal(t, 1, len(app.Status.PastRunNames))
assert.False(t, app.Status.LastRun.IsZero())
assert.True(t, app.Status.NextRun.After(app.Status.LastRun.Time))
run, _ := c.crdClient.SparkoperatorV1alpha1().SparkApplications(app.Namespace).Get(app.Status.PastRunNames[0],
metav1.GetOptions{})
assert.NotNil(t, run)
informer.GetIndexer().Add(app)
c.crdClient.SparkoperatorV1alpha1().ScheduledSparkApplications(app.Namespace).Update(app)
// This sync should not start any run.
err = c.syncScheduledSparkApplication(key)
if err != nil {
t.Fatal(err)
}
app, _ = c.crdClient.SparkoperatorV1alpha1().ScheduledSparkApplications(app.Namespace).Get(app.Name,
metav1.GetOptions{})
assert.Equal(t, v1alpha1.ScheduledState, app.Status.ScheduleState)
// Next run is not due, so there's still only one past run.
assert.Equal(t, 1, len(app.Status.PastRunNames))
informer.GetIndexer().Add(app)
c.crdClient.SparkoperatorV1alpha1().ScheduledSparkApplications(app.Namespace).Update(app)
// Advance the clock to trigger the next run.
clk.SetTime(app.Status.NextRun.Time.Add(5 * time.Second))
// This sync should start the second run.
err = c.syncScheduledSparkApplication(key)
if err != nil {
t.Fatal(err)
}
app, _ = c.crdClient.SparkoperatorV1alpha1().ScheduledSparkApplications(app.Namespace).Get(app.Name,
metav1.GetOptions{})
assert.Equal(t, v1alpha1.ScheduledState, app.Status.ScheduleState)
// The second run should have been started.
assert.Equal(t, 2, len(app.Status.PastRunNames))
run, _ = c.crdClient.SparkoperatorV1alpha1().SparkApplications(app.Namespace).Get(app.Status.PastRunNames[0],
metav1.GetOptions{})
assert.NotNil(t, run)
run, _ = c.crdClient.SparkoperatorV1alpha1().SparkApplications(app.Namespace).Get(app.Status.PastRunNames[1],
metav1.GetOptions{})
assert.NotNil(t, run)
}
func TestShouldStartNextRun(t *testing.T) {
app := &v1alpha1.ScheduledSparkApplication{
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Name: "test-app",
},
Spec: v1alpha1.ScheduledSparkApplicationSpec{
Schedule: "@every 1m",
},
Status: v1alpha1.ScheduledSparkApplicationStatus{
PastRunNames: []string{"run1"},
},
}
c, _, _ := newFakeController(app)
run1 := &v1alpha1.SparkApplication{
ObjectMeta: metav1.ObjectMeta{
Namespace: app.Namespace,
Name: "run1",
},
}
c.crdClient.SparkoperatorV1alpha1().SparkApplications(run1.Namespace).Create(run1)
// ConcurrencyAllow with a running run.
run1.Status.AppState.State = v1alpha1.RunningState
c.crdClient.SparkoperatorV1alpha1().SparkApplications(run1.Namespace).Update(run1)
app.Spec.ConcurrencyPolicy = v1alpha1.ConcurrencyAllow
ok, _ := c.shouldStartNextRun(app)
assert.True(t, ok)
// ConcurrencyForbid with a running run.
app.Spec.ConcurrencyPolicy = v1alpha1.ConcurrencyForbid
ok, _ = c.shouldStartNextRun(app)
assert.False(t, ok)
// ConcurrencyForbid with a completed run.
run1.Status.AppState.State = v1alpha1.CompletedState
c.crdClient.SparkoperatorV1alpha1().SparkApplications(run1.Namespace).Update(run1)
ok, _ = c.shouldStartNextRun(app)
assert.True(t, ok)
// ConcurrencyReplace with a completed run.
app.Spec.ConcurrencyPolicy = v1alpha1.ConcurrencyReplace
ok, _ = c.shouldStartNextRun(app)
assert.True(t, ok)
// ConcurrencyReplace with a running run.
run1.Status.AppState.State = v1alpha1.RunningState
c.crdClient.SparkoperatorV1alpha1().SparkApplications(run1.Namespace).Update(run1)
ok, _ = c.shouldStartNextRun(app)
assert.True(t, ok)
// The previous running run should have been deleted.
existing, _ := c.crdClient.SparkoperatorV1alpha1().SparkApplications(run1.Namespace).Get(run1.Name,
metav1.GetOptions{})
assert.Nil(t, existing)
}
func TestStartNextRun(t *testing.T) {
app := &v1alpha1.ScheduledSparkApplication{
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Name: "test-app",
},
Spec: v1alpha1.ScheduledSparkApplicationSpec{
Schedule: "@every 1m",
},
}
c, _, _ := newFakeController(app)
schedule, _ := cron.ParseStandard(app.Spec.Schedule)
status := app.Status.DeepCopy()
err := c.startNextRun(app, status, schedule)
if err != nil {
t.Fatal(err)
}
assert.Equal(t, 1, len(status.PastRunNames))
assert.True(t, status.NextRun.After(status.LastRun.Time))
// Check the first run.
firstRunName := status.PastRunNames[0]
run, _ := c.crdClient.SparkoperatorV1alpha1().SparkApplications(app.Namespace).Get(firstRunName, metav1.GetOptions{})
assert.NotNil(t, run)
err = c.startNextRun(app, status, schedule)
if err != nil {
t.Fatal(err)
}
assert.Equal(t, 1, len(status.PastRunNames))
assert.True(t, status.NextRun.After(status.LastRun.Time))
// Check the second run.
secondRunName := status.PastRunNames[0]
run, _ = c.crdClient.SparkoperatorV1alpha1().SparkApplications(app.Namespace).Get(secondRunName, metav1.GetOptions{})
assert.NotNil(t, run)
// The second run should have a different name.
assert.NotEqual(t, secondRunName, firstRunName)
// The first run should have been deleted.
run, _ = c.crdClient.SparkoperatorV1alpha1().SparkApplications(app.Namespace).Get(firstRunName, metav1.GetOptions{})
assert.Nil(t, run)
}
func newFakeController(
apps ...*v1alpha1.ScheduledSparkApplication) (*Controller, cache.SharedIndexInformer, *clock.FakeClock) {
crdClient := crdclientfake.NewSimpleClientset()
kubeClient := kubeclientfake.NewSimpleClientset()
apiExtensionsClient := apiextensionsfake.NewSimpleClientset()
informerFactory := crdinformers.NewSharedInformerFactory(crdClient, 1*time.Second)
clk := clock.NewFakeClock(time.Now())
controller := NewController(crdClient, kubeClient, apiExtensionsClient, informerFactory, clk)
informer := informerFactory.Sparkoperator().V1alpha1().ScheduledSparkApplications().Informer()
for _, app := range apps {
crdClient.SparkoperatorV1alpha1().ScheduledSparkApplications(app.Namespace).Create(app)
informer.GetIndexer().Add(app)
}
return controller, informer, clk
}

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
package sparkapplication
import (
"fmt"
@ -42,7 +42,6 @@ import (
crdscheme "k8s.io/spark-on-k8s-operator/pkg/client/clientset/versioned/scheme"
crdinformers "k8s.io/spark-on-k8s-operator/pkg/client/informers/externalversions"
crdlisters "k8s.io/spark-on-k8s-operator/pkg/client/listers/sparkoperator.k8s.io/v1alpha1"
"k8s.io/spark-on-k8s-operator/pkg/crd"
"k8s.io/spark-on-k8s-operator/pkg/util"
)
@ -58,8 +57,8 @@ var (
keyFunc = cache.DeletionHandlingMetaNamespaceKeyFunc
)
// SparkApplicationController manages instances of SparkApplication.
type SparkApplicationController struct {
// Controller manages instances of SparkApplication.
type Controller struct {
crdClient crdclientset.Interface
kubeClient clientset.Interface
extensionsClient apiextensionsclient.Interface
@ -73,13 +72,13 @@ type SparkApplicationController struct {
podStateReportingChan <-chan interface{}
}
// New creates a new SparkApplicationController.
func New(
// NewController creates a new Controller.
func NewController(
crdClient crdclientset.Interface,
kubeClient clientset.Interface,
extensionsClient apiextensionsclient.Interface,
informerFactory crdinformers.SharedInformerFactory,
submissionRunnerWorkers int) *SparkApplicationController {
submissionRunnerWorkers int) *Controller {
crdscheme.AddToScheme(scheme.Scheme)
eventBroadcaster := record.NewBroadcaster()
@ -99,7 +98,7 @@ func newSparkApplicationController(
extensionsClient apiextensionsclient.Interface,
informerFactory crdinformers.SharedInformerFactory,
eventRecorder record.EventRecorder,
submissionRunnerWorkers int) *SparkApplicationController {
submissionRunnerWorkers int) *Controller {
queue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(),
"spark-application-controller")
@ -109,7 +108,7 @@ func newSparkApplicationController(
runner := newSparkSubmitRunner(submissionRunnerWorkers, appStateReportingChan)
sparkPodMonitor := newSparkPodMonitor(kubeClient, podStateReportingChan)
controller := &SparkApplicationController{
controller := &Controller{
crdClient: crdClient,
kubeClient: kubeClient,
extensionsClient: extensionsClient,
@ -133,17 +132,11 @@ func newSparkApplicationController(
return controller
}
// Start starts the SparkApplicationController by registering a watcher for SparkApplication objects.
func (s *SparkApplicationController) Start(workers int, stopCh <-chan struct{}) error {
// Start starts the Controller by registering a watcher for SparkApplication objects.
func (c *Controller) Start(workers int, stopCh <-chan struct{}) error {
glog.Info("Starting the SparkApplication controller")
glog.Infof("Creating CustomResourceDefinition %s", crd.FullName)
err := crd.CreateCRD(s.extensionsClient)
if err != nil {
return fmt.Errorf("failed to create CustomResourceDefinition %s: %v", crd.FullName, err)
}
if !cache.WaitForCacheSync(stopCh, s.cacheSynced) {
if !cache.WaitForCacheSync(stopCh, c.cacheSynced) {
return fmt.Errorf("timed out waiting for cache to sync")
}
@ -151,40 +144,40 @@ func (s *SparkApplicationController) Start(workers int, stopCh <-chan struct{})
for i := 0; i < workers; i++ {
// runWorker will loop until "something bad" happens. Until will then rekick
// the worker after one second.
go wait.Until(s.runWorker, time.Second, stopCh)
go wait.Until(c.runWorker, time.Second, stopCh)
}
go s.runner.run(stopCh)
go s.sparkPodMonitor.run(stopCh)
go c.runner.run(stopCh)
go c.sparkPodMonitor.run(stopCh)
go s.processAppStateUpdates()
go s.processPodStateUpdates()
go c.processAppStateUpdates()
go c.processPodStateUpdates()
return nil
}
// Stop stops the controller.
func (s *SparkApplicationController) Stop() {
func (c *Controller) Stop() {
glog.Info("Stopping the SparkApplication controller")
s.queue.ShutDown()
c.queue.ShutDown()
}
// Callback function called when a new SparkApplication object gets created.
func (s *SparkApplicationController) onAdd(obj interface{}) {
func (c *Controller) onAdd(obj interface{}) {
app := obj.(*v1alpha1.SparkApplication)
if shouldSubmit(app) {
glog.Infof("SparkApplication %s was added, enqueueing it for submission", app.Name)
s.enqueue(app)
s.recorder.Eventf(
glog.Infof("SparkApplication %c was added, enqueueing it for submission", app.Name)
c.enqueue(app)
c.recorder.Eventf(
app,
apiv1.EventTypeNormal,
"SparkApplicationAdded",
"SparkApplication %s was added, enqueued it for submission",
"SparkApplication %c was added, enqueued it for submission",
app.Name)
}
}
func (s *SparkApplicationController) onUpdate(oldObj, newObj interface{}) {
func (c *Controller) onUpdate(oldObj, newObj interface{}) {
oldApp := oldObj.(*v1alpha1.SparkApplication)
newApp := newObj.(*v1alpha1.SparkApplication)
@ -194,13 +187,13 @@ func (s *SparkApplicationController) onUpdate(oldObj, newObj interface{}) {
if oldApp.Status.DriverInfo.PodName != "" {
// Clear the application ID if the driver pod of the old application is to be deleted. This is important as
// otherwise deleting the driver pod of the old application if it's still running will result in a driver state
// otherwise deleting the driver pod of the old application if it'c still running will result in a driver state
// update by the sparkPodMonitor with the driver pod phase set to PodFailed. This may lead to a restart of the
// application if it's subject to a restart because of the failure state update of the driver pod. Clearing the
// application if it'c subject to a restart because of the failure state update of the driver pod. Clearing the
// application ID causes the state update regarding the old driver pod to be ignored in
// processSingleDriverStateUpdate because of mismatched application IDs and as a consequence no restart to be
// triggered. This prevents the application to be submitted twice: one from the restart and one from below.
s.updateSparkApplicationStatusWithRetries(newApp, func(status *v1alpha1.SparkApplicationStatus) {
c.updateSparkApplicationStatusWithRetries(newApp, func(status *v1alpha1.SparkApplicationStatus) {
status.AppID = ""
})
}
@ -208,23 +201,23 @@ func (s *SparkApplicationController) onUpdate(oldObj, newObj interface{}) {
// Delete the driver pod and UI service of the old application. Note that deleting the driver pod kills the
// application if it is still running. Skip submitting the new application if cleanup for the old application
// failed to avoid potentially running both the old and new applications at the same time.
if err := s.deleteDriverAndUIService(oldApp, true); err != nil {
if err := c.deleteDriverAndUIService(oldApp, true); err != nil {
glog.Error(err)
return
}
glog.Infof("SparkApplication %s was updated, enqueueing it for submission", newApp.Name)
s.enqueue(newApp)
s.recorder.Eventf(
glog.Infof("SparkApplication %c was updated, enqueueing it for submission", newApp.Name)
c.enqueue(newApp)
c.recorder.Eventf(
newApp,
apiv1.EventTypeNormal,
"SparkApplicationUpdated",
"SparkApplication %s was updated, enqueued it for submission",
"SparkApplication %c was updated, enqueued it for submission",
newApp.Name)
}
func (s *SparkApplicationController) onDelete(obj interface{}) {
s.dequeue(obj)
func (c *Controller) onDelete(obj interface{}) {
c.dequeue(obj)
var app *v1alpha1.SparkApplication
switch obj.(type) {
@ -236,34 +229,34 @@ func (s *SparkApplicationController) onDelete(obj interface{}) {
}
if app != nil {
s.recorder.Eventf(
c.recorder.Eventf(
app,
apiv1.EventTypeNormal,
"SparkApplicationDeleted",
"SparkApplication %s was deleted",
"SparkApplication %c was deleted",
app.Name)
}
}
// runWorker runs a single controller worker.
func (s *SparkApplicationController) runWorker() {
func (c *Controller) runWorker() {
defer utilruntime.HandleCrash()
for s.processNextItem() {
for c.processNextItem() {
}
}
func (s *SparkApplicationController) processNextItem() bool {
key, quit := s.queue.Get()
func (c *Controller) processNextItem() bool {
key, quit := c.queue.Get()
if quit {
return false
}
defer s.queue.Done(key)
defer c.queue.Done(key)
err := s.syncSparkApplication(key.(string))
err := c.syncSparkApplication(key.(string))
if err == nil {
// Successfully processed the key or the key was not found so tell the queue to stop tracking
// history for your key. This will reset things like failure counts for per-item rate limiting.
s.queue.Forget(key)
c.queue.Forget(key)
return true
}
@ -274,29 +267,29 @@ func (s *SparkApplicationController) processNextItem() bool {
// to avoid hot-looping on particular items (they're probably still not going to work right away)
// and overall controller protection (everything I've done is broken, this controller needs to
// calm down or it can starve other useful work) cases.
s.queue.AddRateLimited(key)
c.queue.AddRateLimited(key)
return true
}
func (s *SparkApplicationController) syncSparkApplication(key string) error {
func (c *Controller) syncSparkApplication(key string) error {
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return fmt.Errorf("failed to get the namespace and name from key %s: %v", key, err)
return fmt.Errorf("failed to get the namespace and name from key %c: %v", key, err)
}
app, err := s.getSparkApplication(namespace, name)
app, err := c.getSparkApplication(namespace, name)
if err != nil {
return err
}
err = s.createSubmission(app)
err = c.createSubmission(app)
if err != nil {
s.recorder.Eventf(
c.recorder.Eventf(
app,
apiv1.EventTypeWarning,
"SparkApplicationSubmissionCreationFailed",
"failed to create a submission for SparkApplication %s: %s",
"failed to create a submission for SparkApplication %c: %c",
app.Name,
err.Error())
return err
@ -306,22 +299,22 @@ func (s *SparkApplicationController) syncSparkApplication(key string) error {
}
// createSubmission creates a new submission for the given SparkApplication and send it to the submission runner.
func (s *SparkApplicationController) createSubmission(app *v1alpha1.SparkApplication) error {
func (c *Controller) createSubmission(app *v1alpha1.SparkApplication) error {
appStatus := v1alpha1.SparkApplicationStatus{
AppID: buildAppID(app),
AppState: v1alpha1.ApplicationState{
State: v1alpha1.NewState,
},
}
name, port, err := createSparkUIService(app, appStatus.AppID, s.kubeClient)
name, port, err := createSparkUIService(app, appStatus.AppID, c.kubeClient)
if err != nil {
glog.Errorf("failed to create a UI service for SparkApplication %s: %v", app.Name, err)
glog.Errorf("failed to create a UI service for SparkApplication %c: %v", app.Name, err)
} else {
appStatus.DriverInfo.WebUIServiceName = name
appStatus.DriverInfo.WebUIPort = port
}
updatedApp := s.updateSparkApplicationStatusWithRetries(app, func(status *v1alpha1.SparkApplicationStatus) {
updatedApp := c.updateSparkApplicationStatusWithRetries(app, func(status *v1alpha1.SparkApplicationStatus) {
*status = v1alpha1.SparkApplicationStatus{}
appStatus.DeepCopyInto(status)
})
@ -332,42 +325,41 @@ func (s *SparkApplicationController) createSubmission(app *v1alpha1.SparkApplica
submissionCmdArgs, err := buildSubmissionCommandArgs(updatedApp)
if err != nil {
return fmt.Errorf(
"failed to build the submission command for SparkApplication %s: %v",
"failed to build the submission command for SparkApplication %c: %v",
updatedApp.Name,
err)
}
s.runner.submit(newSubmission(submissionCmdArgs, updatedApp))
c.runner.submit(newSubmission(submissionCmdArgs, updatedApp))
return nil
}
func (s *SparkApplicationController) processPodStateUpdates() {
for update := range s.podStateReportingChan {
func (c *Controller) processPodStateUpdates() {
for update := range c.podStateReportingChan {
switch update.(type) {
case *driverStateUpdate:
updatedApp := s.processSingleDriverStateUpdate(update.(*driverStateUpdate))
updatedApp := c.processSingleDriverStateUpdate(update.(*driverStateUpdate))
if updatedApp != nil && shouldRestart(updatedApp) {
s.handleRestart(updatedApp)
c.handleRestart(updatedApp)
}
continue
case *executorStateUpdate:
s.processSingleExecutorStateUpdate(update.(*executorStateUpdate))
c.processSingleExecutorStateUpdate(update.(*executorStateUpdate))
}
}
}
func (s *SparkApplicationController) processSingleDriverStateUpdate(
update *driverStateUpdate) *v1alpha1.SparkApplication {
func (c *Controller) processSingleDriverStateUpdate(update *driverStateUpdate) *v1alpha1.SparkApplication {
glog.V(2).Infof(
"Received driver state update for SparkApplication %s in namespace %s with phase %s",
"Received driver state update for SparkApplication %c in namespace %c with phase %c",
update.appName, update.appNamespace, update.podPhase)
app, err := s.getSparkApplication(update.appNamespace, update.appName)
app, err := c.getSparkApplication(update.appNamespace, update.appName)
if err != nil {
// Update may be the result of pod deletion due to deletion of the owning SparkApplication object.
// Ignore the error if the owning SparkApplication object does not exist.
if !errors.IsNotFound(err) {
glog.Errorf("failed to get SparkApplication %s in namespace %s from the store: %v", update.appName,
glog.Errorf("failed to get SparkApplication %c in namespace %c from the store: %v", update.appName,
update.appNamespace, err)
}
return nil
@ -381,26 +373,26 @@ func (s *SparkApplicationController) processSingleDriverStateUpdate(
return nil
}
s.recordDriverEvent(app, update.podPhase, update.podName)
c.recordDriverEvent(app, update.podPhase, update.podName)
// The application state is solely based on the driver pod phase once the application is successfully
// submitted and the driver pod is created.
appState := driverPodPhaseToApplicationState(update.podPhase)
if isAppTerminated(appState) {
s.recorder.Eventf(
c.recorder.Eventf(
app,
apiv1.EventTypeNormal,
"SparkApplicationTerminated",
"SparkApplication %s terminated with state: %v",
"SparkApplication %c terminated with state: %v",
update.appName,
appState)
}
return s.updateSparkApplicationStatusWithRetries(app, func(status *v1alpha1.SparkApplicationStatus) {
return c.updateSparkApplicationStatusWithRetries(app, func(status *v1alpha1.SparkApplicationStatus) {
status.DriverInfo.PodName = update.podName
if update.nodeName != "" {
if nodeIP := s.getNodeExternalIP(update.nodeName); nodeIP != "" {
status.DriverInfo.WebUIAddress = fmt.Sprintf("%s:%d", nodeIP,
if nodeIP := c.getNodeExternalIP(update.nodeName); nodeIP != "" {
status.DriverInfo.WebUIAddress = fmt.Sprintf("%c:%d", nodeIP,
status.DriverInfo.WebUIPort)
}
}
@ -412,53 +404,53 @@ func (s *SparkApplicationController) processSingleDriverStateUpdate(
})
}
func (s *SparkApplicationController) processAppStateUpdates() {
for update := range s.appStateReportingChan {
s.processSingleAppStateUpdate(update)
func (c *Controller) processAppStateUpdates() {
for update := range c.appStateReportingChan {
c.processSingleAppStateUpdate(update)
}
}
func (s *SparkApplicationController) processSingleAppStateUpdate(update *appStateUpdate) *v1alpha1.SparkApplication {
app, err := s.getSparkApplication(update.namespace, update.name)
func (c *Controller) processSingleAppStateUpdate(update *appStateUpdate) *v1alpha1.SparkApplication {
app, err := c.getSparkApplication(update.namespace, update.name)
if err != nil {
glog.Errorf("failed to get SparkApplication %s in namespace %s from the store: %v", update.name,
glog.Errorf("failed to get SparkApplication %c in namespace %c from the store: %v", update.name,
update.namespace, err)
return nil
}
submissionRetries := app.Status.SubmissionRetries
if update.state == v1alpha1.FailedSubmissionState {
s.recorder.Eventf(
c.recorder.Eventf(
app,
apiv1.EventTypeWarning,
"SparkApplicationSubmissionFailed",
"SparkApplication %s failed submission: %s",
"SparkApplication %c failed submission: %c",
update.name,
update.errorMessage)
if shouldRetrySubmission(app) {
glog.Infof("Retrying submission of SparkApplication %s", update.name)
glog.Infof("Retrying submission of SparkApplication %c", update.name)
submissionRetries++
if app.Spec.SubmissionRetryInterval != nil {
interval := time.Duration(*app.Spec.SubmissionRetryInterval) * time.Second
s.enqueueAfter(app, time.Duration(submissionRetries)*interval)
c.enqueueAfter(app, time.Duration(submissionRetries)*interval)
} else {
s.enqueue(app)
c.enqueue(app)
}
s.recorder.Eventf(
c.recorder.Eventf(
app,
apiv1.EventTypeNormal,
"SparkApplicationSubmissionRetry",
"SparkApplication %s is scheduled for a submission retry",
"SparkApplication %c is scheduled for a submission retry",
update.name)
} else {
glog.Infof("Not retrying submission of SparkApplication %s", update.name)
glog.Infof("Not retrying submission of SparkApplication %c", update.name)
}
}
return s.updateSparkApplicationStatusWithRetries(app, func(status *v1alpha1.SparkApplicationStatus) {
return c.updateSparkApplicationStatusWithRetries(app, func(status *v1alpha1.SparkApplicationStatus) {
status.AppState.State = update.state
status.AppState.ErrorMessage = update.errorMessage
status.SubmissionRetries = submissionRetries
@ -468,18 +460,17 @@ func (s *SparkApplicationController) processSingleAppStateUpdate(update *appStat
})
}
func (s *SparkApplicationController) processSingleExecutorStateUpdate(
update *executorStateUpdate) *v1alpha1.SparkApplication {
func (c *Controller) processSingleExecutorStateUpdate(update *executorStateUpdate) *v1alpha1.SparkApplication {
glog.V(2).Infof(
"Received state update of executor %s for SparkApplication %s in namespace %s with state %s",
"Received state update of executor %c for SparkApplication %c in namespace %c with state %c",
update.executorID, update.appName, update.appNamespace, update.state)
app, err := s.getSparkApplication(update.appNamespace, update.appName)
app, err := c.getSparkApplication(update.appNamespace, update.appName)
if err != nil {
// Update may be the result of pod deletion due to deletion of the owning SparkApplication object.
// Ignore the error if the owning SparkApplication object does not exist.
if !errors.IsNotFound(err) {
glog.Errorf("failed to get SparkApplication %s in namespace %s from the store: %v", update.appName,
glog.Errorf("failed to get SparkApplication %c in namespace %c from the store: %v", update.appName,
update.appNamespace, err)
}
return nil
@ -493,9 +484,9 @@ func (s *SparkApplicationController) processSingleExecutorStateUpdate(
return nil
}
s.recordExecutorEvent(app, update.state, update.podName)
c.recordExecutorEvent(app, update.state, update.podName)
return s.updateSparkApplicationStatusWithRetries(app, func(status *v1alpha1.SparkApplicationStatus) {
return c.updateSparkApplicationStatusWithRetries(app, func(status *v1alpha1.SparkApplicationStatus) {
if status.ExecutorState == nil {
status.ExecutorState = make(map[string]v1alpha1.ExecutorState)
}
@ -507,14 +498,14 @@ func (s *SparkApplicationController) processSingleExecutorStateUpdate(
})
}
func (s *SparkApplicationController) updateSparkApplicationStatusWithRetries(
func (c *Controller) updateSparkApplicationStatusWithRetries(
original *v1alpha1.SparkApplication,
updateFunc func(*v1alpha1.SparkApplicationStatus)) *v1alpha1.SparkApplication {
toUpdate := original.DeepCopy()
var lastUpdateErr error
for i := 0; i < maximumUpdateRetries; i++ {
updated, err := s.tryUpdateStatus(original, toUpdate, updateFunc)
updated, err := c.tryUpdateStatus(original, toUpdate, updateFunc)
if err == nil {
return updated
}
@ -523,22 +514,22 @@ func (s *SparkApplicationController) updateSparkApplicationStatusWithRetries(
// Failed update to the API server.
// Get the latest version from the API server first and re-apply the update.
name := toUpdate.Name
toUpdate, err = s.crdClient.SparkoperatorV1alpha1().SparkApplications(toUpdate.Namespace).Get(name,
toUpdate, err = c.crdClient.SparkoperatorV1alpha1().SparkApplications(toUpdate.Namespace).Get(name,
metav1.GetOptions{})
if err != nil {
glog.Errorf("failed to get SparkApplication %s: %v", name, err)
glog.Errorf("failed to get SparkApplication %c: %v", name, err)
return nil
}
}
if lastUpdateErr != nil {
glog.Errorf("failed to update SparkApplication %s: %v", toUpdate.Name, lastUpdateErr)
glog.Errorf("failed to update SparkApplication %c: %v", toUpdate.Name, lastUpdateErr)
}
return nil
}
func (s *SparkApplicationController) tryUpdateStatus(
func (c *Controller) tryUpdateStatus(
original *v1alpha1.SparkApplication,
toUpdate *v1alpha1.SparkApplication,
updateFunc func(*v1alpha1.SparkApplicationStatus)) (*v1alpha1.SparkApplication, error) {
@ -547,49 +538,48 @@ func (s *SparkApplicationController) tryUpdateStatus(
return nil, nil
}
return s.crdClient.SparkoperatorV1alpha1().SparkApplications(toUpdate.Namespace).Update(toUpdate)
return c.crdClient.SparkoperatorV1alpha1().SparkApplications(toUpdate.Namespace).Update(toUpdate)
}
func (s *SparkApplicationController) getSparkApplication(namespace string, name string) (
*v1alpha1.SparkApplication, error) {
return s.lister.SparkApplications(namespace).Get(name)
func (c *Controller) getSparkApplication(namespace string, name string) (*v1alpha1.SparkApplication, error) {
return c.lister.SparkApplications(namespace).Get(name)
}
func (s *SparkApplicationController) enqueue(obj interface{}) {
func (c *Controller) enqueue(obj interface{}) {
key, err := keyFunc(obj)
if err != nil {
glog.Errorf("failed to get key for %v: %v", obj, err)
return
}
s.queue.AddRateLimited(key)
c.queue.AddRateLimited(key)
}
func (s *SparkApplicationController) enqueueAfter(obj interface{}, after time.Duration) {
func (c *Controller) enqueueAfter(obj interface{}, after time.Duration) {
key, err := keyFunc(obj)
if err != nil {
glog.Errorf("failed to get key for %v: %v", obj, err)
return
}
s.queue.AddAfter(key, after)
c.queue.AddAfter(key, after)
}
func (s *SparkApplicationController) dequeue(obj interface{}) {
func (c *Controller) dequeue(obj interface{}) {
key, err := keyFunc(obj)
if err != nil {
glog.Errorf("failed to get key for %v: %v", obj, err)
return
}
s.queue.Forget(key)
s.queue.Done(key)
c.queue.Forget(key)
c.queue.Done(key)
}
func (s *SparkApplicationController) getNodeExternalIP(nodeName string) string {
node, err := s.kubeClient.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
func (c *Controller) getNodeExternalIP(nodeName string) string {
node, err := c.kubeClient.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
if err != nil {
glog.Errorf("failed to get node %s", nodeName)
glog.Errorf("failed to get node %c", nodeName)
return ""
}
@ -603,14 +593,14 @@ func (s *SparkApplicationController) getNodeExternalIP(nodeName string) string {
// handleRestart handles application restart if the application has terminated and is subject to restart according to
// the restart policy.
func (s *SparkApplicationController) handleRestart(app *v1alpha1.SparkApplication) {
glog.Infof("SparkApplication %s failed or terminated, restarting it with RestartPolicy %s",
func (c *Controller) handleRestart(app *v1alpha1.SparkApplication) {
glog.Infof("SparkApplication %c failed or terminated, restarting it with RestartPolicy %c",
app.Name, app.Spec.RestartPolicy)
s.recorder.Eventf(
c.recorder.Eventf(
app,
apiv1.EventTypeNormal,
"SparkApplicationRestart",
"SparkApplication %s is subject to restart",
"SparkApplication %c is subject to restart",
app.Name)
// Delete the old driver pod and UI service if necessary. Note that in case an error occurred here, we simply
@ -618,38 +608,36 @@ func (s *SparkApplicationController) handleRestart(app *v1alpha1.SparkApplicatio
// terminated so failure to cleanup the old driver pod and UI service is not a blocker. Also note that because
// deleting a already terminated driver pod won't trigger a driver state update by the sparkPodMonitor so won't
// cause repetitive restart handling.
if err := s.deleteDriverAndUIService(app, false); err != nil {
if err := c.deleteDriverAndUIService(app, false); err != nil {
glog.Error(err)
}
//Enqueue the object for re-submission.
s.enqueue(app)
c.enqueue(app)
}
func (s *SparkApplicationController) deleteDriverAndUIService(
app *v1alpha1.SparkApplication,
waitForDriverDeletion bool) error {
func (c *Controller) deleteDriverAndUIService(app *v1alpha1.SparkApplication, waitForDriverDeletion bool) error {
var zero int64
if app.Status.DriverInfo.PodName != "" {
err := s.kubeClient.CoreV1().Pods(app.Namespace).Delete(app.Status.DriverInfo.PodName,
err := c.kubeClient.CoreV1().Pods(app.Namespace).Delete(app.Status.DriverInfo.PodName,
&metav1.DeleteOptions{GracePeriodSeconds: &zero})
if err != nil {
return fmt.Errorf("failed to delete old driver pod %s of SparkApplication %s: %v",
return fmt.Errorf("failed to delete old driver pod %c of SparkApplication %c: %v",
app.Status.DriverInfo.PodName, app.Name, err)
}
}
if app.Status.DriverInfo.WebUIServiceName != "" {
err := s.kubeClient.CoreV1().Services(app.Namespace).Delete(app.Status.DriverInfo.WebUIServiceName,
err := c.kubeClient.CoreV1().Services(app.Namespace).Delete(app.Status.DriverInfo.WebUIServiceName,
&metav1.DeleteOptions{GracePeriodSeconds: &zero})
if err != nil {
return fmt.Errorf("failed to delete old web UI service %s of SparkApplication %s: %v",
return fmt.Errorf("failed to delete old web UI service %c of SparkApplication %c: %v",
app.Status.DriverInfo.WebUIServiceName, app.Name, err)
}
}
if waitForDriverDeletion {
wait.Poll(500*time.Millisecond, 60*time.Second, func() (bool, error) {
_, err := s.kubeClient.CoreV1().Pods(app.Namespace).Get(app.Status.DriverInfo.PodName, metav1.GetOptions{})
_, err := c.kubeClient.CoreV1().Pods(app.Namespace).Get(app.Status.DriverInfo.PodName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return true, nil
@ -663,21 +651,21 @@ func (s *SparkApplicationController) deleteDriverAndUIService(
return nil
}
func (s *SparkApplicationController) recordDriverEvent(
func (c *Controller) recordDriverEvent(
app *v1alpha1.SparkApplication, phase apiv1.PodPhase, name string) {
if phase == apiv1.PodSucceeded {
s.recorder.Eventf(app, apiv1.EventTypeNormal, "SparkDriverCompleted", "Driver %s completed", name)
c.recorder.Eventf(app, apiv1.EventTypeNormal, "SparkDriverCompleted", "Driver %s completed", name)
} else if phase == apiv1.PodFailed {
s.recorder.Eventf(app, apiv1.EventTypeWarning, "SparkDriverFailed", "Driver %s failed", name)
c.recorder.Eventf(app, apiv1.EventTypeWarning, "SparkDriverFailed", "Driver %s failed", name)
}
}
func (s *SparkApplicationController) recordExecutorEvent(
func (c *Controller) recordExecutorEvent(
app *v1alpha1.SparkApplication, state v1alpha1.ExecutorState, name string) {
if state == v1alpha1.ExecutorCompletedState {
s.recorder.Eventf(app, apiv1.EventTypeNormal, "SparkExecutorCompleted", "Executor %s completed", name)
c.recorder.Eventf(app, apiv1.EventTypeNormal, "SparkExecutorCompleted", "Executor %s completed", name)
} else if state == v1alpha1.ExecutorFailedState {
s.recorder.Eventf(app, apiv1.EventTypeWarning, "SparkExecutorFailed", "Executor %s failed", name)
c.recorder.Eventf(app, apiv1.EventTypeWarning, "SparkExecutorFailed", "Executor %s failed", name)
}
}

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
package sparkapplication
import (
"os"
@ -37,7 +37,7 @@ import (
crdinformers "k8s.io/spark-on-k8s-operator/pkg/client/informers/externalversions"
)
func newFakeController(apps ...*v1alpha1.SparkApplication) (*SparkApplicationController, *record.FakeRecorder) {
func newFakeController(apps ...*v1alpha1.SparkApplication) (*Controller, *record.FakeRecorder) {
crdclientfake.AddToScheme(scheme.Scheme)
crdClient := crdclientfake.NewSimpleClientset()
kubeClient := kubeclientfake.NewSimpleClientset()

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
package sparkapplication
import (
"fmt"

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
package sparkapplication
import (
"testing"

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
package sparkapplication
import (
"fmt"

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
package sparkapplication
import (
"reflect"

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
package sparkapplication
import (
"fmt"

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
package sparkapplication
import (
"os"

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
package sparkapplication
import (
"testing"

View File

@ -14,4 +14,4 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
package sparkapplication

View File

@ -17,4 +17,5 @@ limitations under the License.
package crd
// Package crd contains the definition of and code to work with the CustomResourceDefinition (CRD)
// for SparkApplication and a client to perform CRUD operations on instances of SparkApplication.
// for SparkApplication and ScheduledSparkApplication and a client to perform CRUD operations on
// instances of SparkApplication and ScheduledSparkApplication.

View File

@ -0,0 +1,220 @@
/*
Copyright 2017 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduledsparkapplication
import (
"fmt"
"reflect"
"time"
"github.com/golang/glog"
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io"
"k8s.io/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1alpha1"
)
// CRD metadata.
const (
Plural = "scheduledsparkapplications"
Singular = "scheduledsparkapplication"
ShortName = "scheduledsparkapp"
Group = sparkoperator.GroupName
Version = "v1alpha1"
FullName = Plural + "." + Group
)
// CreateCRD creates a Kubernetes CustomResourceDefinition (CRD) for SparkApplication.
// An error is returned if it fails to create the CustomResourceDefinition before it times out.
func CreateCRD(clientset apiextensionsclient.Interface) error {
// The CustomResourceDefinition is not found, create it now.
sparkAppCrd := &apiextensionsv1beta1.CustomResourceDefinition{
ObjectMeta: metav1.ObjectMeta{
Name: FullName,
},
Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{
Group: Group,
Version: Version,
Scope: apiextensionsv1beta1.NamespaceScoped,
Names: apiextensionsv1beta1.CustomResourceDefinitionNames{
Plural: Plural,
Singular: Singular,
ShortNames: []string{ShortName},
Kind: reflect.TypeOf(v1alpha1.ScheduledSparkApplication{}).Name(),
},
Validation: getCustomResourceValidation(),
},
}
_, err := clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Create(sparkAppCrd)
if err != nil {
if apierrors.IsAlreadyExists(err) {
glog.Warningf("CustomResourceDefinition %s already exists", FullName)
return nil
}
return err
}
// Wait for the CustomResourceDefinition to become registered.
err = waitForCRDEstablishment(clientset)
// Try deleting the CustomResourceDefinition if it fails to be registered on time.
if err != nil {
deleteErr := deleteCRD(clientset)
if deleteErr != nil {
return errors.NewAggregate([]error{err, deleteErr})
}
return err
}
return nil
}
func deleteCRD(clientset apiextensionsclient.Interface) error {
var zero int64
err := clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Delete(FullName,
&metav1.DeleteOptions{GracePeriodSeconds: &zero})
if err != nil && !apierrors.IsNotFound(err) {
return err
}
return nil
}
func getCRD(clientset apiextensionsclient.Interface) (*apiextensionsv1beta1.CustomResourceDefinition, error) {
return clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Get(FullName, metav1.GetOptions{})
}
// waitForCRDEstablishment waits for the CRD to be registered and established until it times out.
func waitForCRDEstablishment(clientset apiextensionsclient.Interface) error {
return wait.Poll(500*time.Millisecond, 60*time.Second, func() (bool, error) {
sparkAppCrd, err := getCRD(clientset)
for _, cond := range sparkAppCrd.Status.Conditions {
switch cond.Type {
case apiextensionsv1beta1.Established:
if cond.Status == apiextensionsv1beta1.ConditionTrue {
return true, err
}
case apiextensionsv1beta1.NamesAccepted:
if cond.Status == apiextensionsv1beta1.ConditionFalse {
fmt.Printf("Name conflict: %v\n", cond.Reason)
}
}
}
return false, err
})
}
func getCustomResourceValidation() *apiextensionsv1beta1.CustomResourceValidation {
return &apiextensionsv1beta1.CustomResourceValidation{
OpenAPIV3Schema: &apiextensionsv1beta1.JSONSchemaProps{
Properties: map[string]apiextensionsv1beta1.JSONSchemaProps{
"spec": {
Properties: map[string]apiextensionsv1beta1.JSONSchemaProps{
"schedule": {
Type: "string",
},
"concurrencyPolicy": {
Enum: []apiextensionsv1beta1.JSON{
{Raw: []byte(`"Allow"`)},
{Raw: []byte(`"Forbid"`)},
{Raw: []byte(`"Replace"`)},
},
},
"template": {
Properties: map[string]apiextensionsv1beta1.JSONSchemaProps{
"type": {
Enum: []apiextensionsv1beta1.JSON{
{Raw: []byte(`"Java"`)},
{Raw: []byte(`"Scala"`)},
{Raw: []byte(`"Python"`)},
{Raw: []byte(`"R"`)},
},
},
"mode": {
Enum: []apiextensionsv1beta1.JSON{
{Raw: []byte(`"cluster"`)},
{Raw: []byte(`"client"`)},
},
},
"driver": {
Properties: map[string]apiextensionsv1beta1.JSONSchemaProps{
"cores": {
Type: "number",
Minimum: float64Ptr(0),
ExclusiveMinimum: true,
},
"podName": {
Pattern: "[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*",
},
},
},
"executor": {
Properties: map[string]apiextensionsv1beta1.JSONSchemaProps{
"cores": {
Type: "integer",
Minimum: float64Ptr(1),
},
"instances": {
Type: "integer",
Minimum: float64Ptr(1),
},
},
},
"deps": {
Properties: map[string]apiextensionsv1beta1.JSONSchemaProps{
"downloadTimeout": {
Type: "integer",
Minimum: float64Ptr(1),
},
"maxSimultaneousDownloads": {
Type: "integer",
Minimum: float64Ptr(1),
},
},
},
"restartPolicy": {
Enum: []apiextensionsv1beta1.JSON{
{Raw: []byte(`"Never"`)},
{Raw: []byte(`"OnFailure"`)},
{Raw: []byte(`"Always"`)},
},
},
"maxSubmissionRetries": {
Type: "integer",
Minimum: float64Ptr(1),
},
"submissionRetryInterval": {
Type: "integer",
Minimum: float64Ptr(1),
},
},
},
},
},
},
},
}
}
func float64Ptr(f float64) *float64 {
return &f
}

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package crd
package sparkapplication
import (
"fmt"