Merge branch 'feature/combine_codes' of https://github.com/TommyLike/volcano into feature/combine_codes

This commit is contained in:
TommyLike 2019-04-30 14:27:25 +08:00 committed by TommyLike
commit 2a0c3a84fd
99 changed files with 11550 additions and 210 deletions

2
.gitignore vendored
View File

@ -124,4 +124,4 @@ kubernetes.tar.gz
*.pyc
# e2e log files
/hack/*.log
*.log

View File

@ -14,7 +14,6 @@ jobs:
before_script:
- go get -u golang.org/x/lint/golint
script:
- make
- make verify
- stage: E2E Tests
before_script:
@ -28,6 +27,11 @@ jobs:
- sudo curl -o /usr/local/bin/kind -L https://github.com/kubernetes-sigs/kind/releases/download/0.2.0/kind-linux-amd64
- sudo chmod +x /usr/local/bin/kind
script:
- make cli
- make docker
- make vkctl
- make images
- make e2e-test-kind
after_failure:
# Echo logs and upload
- test -f volcano-admission.log && echo "******<<admission logs>>******" && cat volcano-admission.log
- test -f volcano-controller.log && echo "******<<controller logs>>******" && cat volcano-controller.log
- test -f volcano-scheduler.log && echo "******<<scheduler logs>>******" && cat volcano-scheduler.log

92
Gopkg.lock generated
View File

@ -138,6 +138,22 @@
pruneopts = "UT"
revision = "787624de3eb7bd915c329cba748687a3b22666a6"
[[projects]]
digest = "1:0ade334594e69404d80d9d323445d2297ff8161637f9b2d347cc6973d2d6f05b"
name = "github.com/hashicorp/errwrap"
packages = ["."]
pruneopts = "UT"
revision = "8a6fb523712970c966eefc6b39ed2c5e74880354"
version = "v1.0.0"
[[projects]]
digest = "1:f668349b83f7d779567c880550534addeca7ebadfdcf44b0b9c39be61864b4b7"
name = "github.com/hashicorp/go-multierror"
packages = ["."]
pruneopts = "UT"
revision = "886a7fbe3eb1c874d46f623bfa70af45f425b3d1"
version = "v1.0.0"
[[projects]]
digest = "1:3f90d23757c18b1e07bf11494dbe737ee2c44d881c0f41e681611abdadad62fa"
name = "github.com/hashicorp/golang-lru"
@ -186,6 +202,52 @@
revision = "ab8a2e0c74be9d3be70b3184d9acc634935ded82"
version = "1.1.4"
[[projects]]
branch = "master"
digest = "1:373e831b25d35980629d8042cf8941f1455cee70e846e2c91064ecb074ce3826"
name = "github.com/kubernetes-sigs/kube-batch"
packages = [
"cmd/kube-batch/app",
"cmd/kube-batch/app/options",
"pkg/apis/scheduling/v1alpha1",
"pkg/apis/utils",
"pkg/client/clientset/versioned",
"pkg/client/clientset/versioned/scheme",
"pkg/client/clientset/versioned/typed/scheduling/v1alpha1",
"pkg/client/informers/externalversions",
"pkg/client/informers/externalversions/internalinterfaces",
"pkg/client/informers/externalversions/scheduling",
"pkg/client/informers/externalversions/scheduling/v1alpha1",
"pkg/client/listers/scheduling/v1alpha1",
"pkg/scheduler",
"pkg/scheduler/actions",
"pkg/scheduler/actions/allocate",
"pkg/scheduler/actions/backfill",
"pkg/scheduler/actions/enqueue",
"pkg/scheduler/actions/preempt",
"pkg/scheduler/actions/reclaim",
"pkg/scheduler/api",
"pkg/scheduler/api/helpers",
"pkg/scheduler/cache",
"pkg/scheduler/conf",
"pkg/scheduler/framework",
"pkg/scheduler/metrics",
"pkg/scheduler/plugins",
"pkg/scheduler/plugins/conformance",
"pkg/scheduler/plugins/drf",
"pkg/scheduler/plugins/gang",
"pkg/scheduler/plugins/nodeorder",
"pkg/scheduler/plugins/predicates",
"pkg/scheduler/plugins/priority",
"pkg/scheduler/plugins/proportion",
"pkg/scheduler/plugins/util",
"pkg/scheduler/util",
"pkg/version",
]
pruneopts = "UT"
revision = "2e229b9ef61ca616735b667beb77a01fac2bb5f1"
source = "https://github.com/volcano-sh/kube-batch"
[[projects]]
digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc"
name = "github.com/matttproud/golang_protobuf_extensions"
@ -980,20 +1042,26 @@
analyzer-version = 1
input-imports = [
"github.com/golang/glog",
"github.com/hashicorp/go-multierror",
"github.com/kubernetes-sigs/kube-batch/cmd/kube-batch/app",
"github.com/kubernetes-sigs/kube-batch/cmd/kube-batch/app/options",
"github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1",
"github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned",
"github.com/kubernetes-sigs/kube-batch/pkg/client/informers/externalversions",
"github.com/kubernetes-sigs/kube-batch/pkg/client/informers/externalversions/scheduling/v1alpha1",
"github.com/kubernetes-sigs/kube-batch/pkg/client/listers/scheduling/v1alpha1",
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/actions",
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api",
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/plugins",
"github.com/onsi/ginkgo",
"github.com/onsi/gomega",
"github.com/prometheus/client_golang/prometheus",
"github.com/prometheus/client_golang/prometheus/promauto",
"github.com/prometheus/client_golang/prometheus/promhttp",
"github.com/spf13/cobra",
"github.com/spf13/pflag",
"golang.org/x/crypto/ssh",
"gopkg.in/yaml.v2",
"k8s.io/api/admission/v1beta1",
"k8s.io/api/admissionregistration/v1beta1",
"k8s.io/api/apps/v1",
"k8s.io/api/core/v1",
"k8s.io/api/policy/v1beta1",
"k8s.io/api/scheduling/v1beta1",
"k8s.io/apimachinery/pkg/api/errors",
"k8s.io/apimachinery/pkg/api/meta",
@ -1008,6 +1076,7 @@
"k8s.io/apimachinery/pkg/util/runtime",
"k8s.io/apimachinery/pkg/util/strategicpatch",
"k8s.io/apimachinery/pkg/util/uuid",
"k8s.io/apimachinery/pkg/util/validation",
"k8s.io/apimachinery/pkg/util/validation/field",
"k8s.io/apimachinery/pkg/util/wait",
"k8s.io/apimachinery/pkg/watch",
@ -1016,14 +1085,13 @@
"k8s.io/client-go/discovery/fake",
"k8s.io/client-go/informers",
"k8s.io/client-go/informers/core/v1",
"k8s.io/client-go/informers/policy/v1beta1",
"k8s.io/client-go/informers/scheduling/v1beta1",
"k8s.io/client-go/informers/storage/v1",
"k8s.io/client-go/kubernetes",
"k8s.io/client-go/kubernetes/scheme",
"k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1",
"k8s.io/client-go/kubernetes/typed/core/v1",
"k8s.io/client-go/listers/core/v1",
"k8s.io/client-go/listers/scheduling/v1beta1",
"k8s.io/client-go/plugin/pkg/client/auth/gcp",
"k8s.io/client-go/rest",
"k8s.io/client-go/testing",
@ -1041,14 +1109,10 @@
"k8s.io/code-generator/cmd/lister-gen",
"k8s.io/gengo/args",
"k8s.io/gengo/examples/deepcopy-gen/generators",
"k8s.io/kubernetes/pkg/api/v1/pod",
"k8s.io/kubernetes/pkg/apis/scheduling",
"k8s.io/kubernetes/pkg/scheduler/algorithm",
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates",
"k8s.io/kubernetes/pkg/scheduler/algorithm/priorities",
"k8s.io/kubernetes/pkg/apis/core",
"k8s.io/kubernetes/pkg/apis/core/v1",
"k8s.io/kubernetes/pkg/apis/core/validation",
"k8s.io/kubernetes/pkg/scheduler/api",
"k8s.io/kubernetes/pkg/scheduler/cache",
"k8s.io/kubernetes/pkg/scheduler/volumebinder",
]
solver-name = "gps-cdcl"
solver-version = 1

View File

@ -36,9 +36,10 @@ required = [
branch = "master"
name = "github.com/golang/glog"
#[[constraint]]
# name = "volcano.sh/volcano"
# version = "0.4.1"
[[constraint]]
name = "github.com/kubernetes-sigs/kube-batch"
branch = "master"
source = "https://github.com/volcano-sh/kube-batch"
[[constraint]]
name = "github.com/onsi/ginkgo"
@ -99,3 +100,8 @@ required = [
[[prune.project]]
name = "k8s.io/code-generator"
unused-packages = false
[[constraint]]
name = "github.com/hashicorp/go-multierror"
version = "1.0.0"

View File

@ -1,12 +1,10 @@
BIN_DIR=_output/bin
CHART_DIR=_output/chart
IMAGE_DIR=_output/image
IMAGE=volcano
REL_OSARCH="linux/amd64"
TAG=v0.4.2
VERSION?=${TAG}
RELEASE_VER?=${TAG}
IMAGE_PREFIX=kubesigs/vk
REL_OSARCH=linux/amd64
REPO_PATH=volcano.sh/volcano
IMAGE_PREFIX=volcanosh/vk
TAG=latest
GitSHA=`git rev-parse HEAD`
Date=`date "+%Y-%m-%d %H:%M:%S"`
LD_FLAGS=" \
-X '${REPO_PATH}/pkg/version.GitSHA=${GitSHA}' \
-X '${REPO_PATH}/pkg/version.Built=${Date}' \
@ -16,8 +14,11 @@ LD_FLAGS=" \
all: kube-batch vk-controllers vk-admission vkctl
init:
mkdir -p ${BIN_DIR}
kube-batch: init
go build -ldflags ${LD_FLAGS} -o=${BIN_DIR}/kube-batch ./cmd/kube-batch
go build -ldflags ${LD_FLAGS} -o=${BIN_DIR}/kube-batch ./cmd/scheduler
vk-controllers: init
go build -ldflags ${LD_FLAGS} -o=${BIN_DIR}/vk-controllers ./cmd/controllers
@ -28,42 +29,23 @@ vk-admission: init
vkctl: init
go build -ldflags ${LD_FLAGS} -o=${BIN_DIR}/vkctl ./cmd/cli
init:
mkdir -p ${BIN_DIR}
mkdir -p ${CHART_DIR}
mkdir -p ${IMAGE_DIR}
rel_bins:
image_bins:
go get github.com/mitchellh/gox
#Build kube-batch binary
CGO_ENABLED=0 gox -osarch=${REL_OSARCH} -ldflags ${LD_FLAGS} \
-output=${BIN_DIR}/{{.OS}}/{{.Arch}}/kube-batch ./cmd/kube-batch
#Build job controller & job admission
#TODO: Add version support in job controller and admission to make LD_FLAGS work
for name in controllers admission; do\
CGO_ENABLED=0 gox -osarch=${REL_OSARCH} -ldflags ${LD_FLAGS} -output ${BIN_DIR}/{{.OS}}/{{.Arch}}/vk-$$name ./cmd/$$name; \
CGO_ENABLED=0 gox -osarch=${REL_OSARCH} -output ${BIN_DIR}/${REL_OSARCH}/vkctl ./cmd/cli
for name in controllers scheduler admission; do\
CGO_ENABLED=0 gox -osarch=${REL_OSARCH} -output ${BIN_DIR}/${REL_OSARCH}/vk-$$name ./cmd/$$name; \
done
images: rel_bins
#Build kube-batch images
cp ${BIN_DIR}/${REL_OSARCH}/kube-batch ./deployment/images/
docker build ./deployment/images -t kubesigs/kube-batch:${RELEASE_VER}
rm -f ./deployment/images/kube-batch
#Build job controller and admission images
for name in controllers admission; do\
cp ${BIN_DIR}/${REL_OSARCH}/vk-$$name ./deployment/images/$$name/; \
docker build --no-cache -t $(IMAGE_PREFIX)-$$name:$(RELEASE_VER) ./deployment/images/$$name; \
rm deployment/images/$$name/vk-$$name; \
done
docker: images
images: image_bins
for name in controllers scheduler admission; do\
cp ${BIN_DIR}/${REL_OSARCH}/vk-$$name ./installer/dockerfile/$$name/; \
docker build --no-cache -t $(IMAGE_PREFIX)-$$name:$(TAG) ./installer/dockerfile/$$name; \
rm installer/dockerfile/$$name/vk-$$name; \
done
generate-code:
./hack/update-gencode.sh
e2e-test:
./hack/run-e2e.sh
unit-test:
go list ./... | grep -v e2e | xargs go test -v
@ -78,14 +60,3 @@ verify: generate-code
hack/verify-gofmt.sh
hack/verify-golint.sh
hack/verify-gencode.sh
chart: init
helm package ./deployment/volcano --version=${VERSION} --destination=${CHART_DIR}
package: clean images chart vkctl
docker save kubesigs/kube-batch:${RELEASE_VER} > ${IMAGE_DIR}/kube-batch.$(RELEASE_VER).tar;
for name in controllers admission; do \
docker save $(IMAGE_PREFIX)-$$name:$(RELEASE_VER) > ${IMAGE_DIR}/$(IMAGE)-$$name.$(RELEASE_VER).tar; \
done
gzip ${IMAGE_DIR}/*.tar
tar -zcvf _output/Volcano-package-${VERSION}.tgz -C _output/ ./bin/vkctl ./chart ./image

116
README.md
View File

@ -1,55 +1,129 @@
# Volcano
![volcano-logo](docs/images/volcano-logo.png)
[![Build Status](https://travis-ci.com/volcano-sh/volcano.svg?token=sstuqFE81ukmNz9cEEtd&branch=master)](https://travis-ci.com/volcano-sh/volcano) [![slack](https://img.shields.io/badge/Volcano-%23SLACK-red.svg)](https://volcano-sh.slack.com/messages/CGET876H5/)
-------
Volcano is a Kubernetes-based system for high performance workload, providing mechanisms for applications
which would like to run high performance workload leveraging Kubernetes, e.g. Tensorflow, Spark, MPI.
[![Build Status](https://travis-ci.org/volcano-sh/volcano.svg?branch=master)](https://travis-ci.org/volcano-sh/volcano)
[![Go Report Card](https://goreportcard.com/badge/github.com/volcano-sh/volcano)](https://goreportcard.com/report/github.com/volcano-sh/volcano)
[![RepoSize](https://img.shields.io/github/repo-size/volcano-sh/volcano.svg)](https://github.com/volcano-sh/volcano)
[![Release](https://img.shields.io/github/release/volcano-sh/volcano.svg)](https://github.com/volcano-sh/volcano/releases)
[![LICENSE](https://img.shields.io/github/license/volcano-sh/volcano.svg)](https://github.com/volcano-sh/volcano/blob/master/LICENSE)
Volcano builds upon a decade and a half of experience on running high performance workload workloads at scale
using several systems, combined with best-of-breed ideas and practices from the open source community.
Volcano is system for runnning high performance workloads on
Kubernetes. It provides a suite of mechanisms currently missing from
Kubernetes that are commonly required by many classes of high
performance workload including:
1. machine learning/deep learning,
2. bioinformatics/genomics, and
3. other "big data" applications.
These types of applications typically run on generalized domain
frameworks like Tensorflow, Spark, PyTorch, MPI, etc, which Volcano integrates with.
Some examples of the mechanisms and features that Volcano adds to Kubernetes are:
1. Job management extensions and improvements, e.g:
1. Multi-pod jobs
2. Lifecycle management extensions including suspend/resume and
restart.
3. Improved error handling
4. Indexed jobs
5. Task dependencies
2. Scheduling extensions, e.g:
1. Co-scheduling
2. Fair-share scheduling
3. Queue scheduling
4. Preemption and reclaims
5. Reservartions and backfills
6. Topology-based scheduling
3. Runtime extensions, e.g:
1. Support for specialized continer runtimes like Singularity,
with GPU accelerator extensions and enhanced security features.
4. Other
1. Data locality awareness and intelligent scheduling
2. Optimizations for data throughput, round-trip latency, etc.
Volcano builds upon a decade and a half of experience running a wide
variety of high performance workloads at scale using several systems
and platforms, combined with best-of-breed ideas and practices from
the open source community.
## Overall Architecture
![volcano](docs/images/volcano-intro.png)
## Installation
## Quick Start Guide
The easiest way to use Volcano is to use the Helm chart.
The easiest way to deploy Volcano is to use the Helm chart.
### Pre-requisites
### 1. Volcano Image
Official images now are not available on DockerHub, however you can build them locally with command:
```
make docker
```
**NOTE**: You need ensure the images are correctly loaded in your kubernetes cluster, for
example, if you are using [kind cluster](https://github.com/kubernetes-sigs/kind),
try command ```kind load docker-image <image-name>:<tag> ``` for each of the images.
First of all, clone the repo to your local path:
### 2. Helm charts
First of all, clone repo to your local path
```
# mkdir -p $GOPATH/src/volcano.sh/
# cd $GOPATH/src/volcano.sh/
# git clone https://github.com/volcano-sh/volcano.git
```
Second, install required helm plugin and generate valid certificate, volcano uses a helm plugin **gen-admission-secret**
to generate certificate for admission service to communicate with kubernetes API server.
### 1. Volcano Image
Official images are available on [DockerHub](https://hub.docker.com/u/volcanosh), however you can
build them locally with the command:
```
cd $GOPATH/src/volcano.sh/volcano
make images
## Verify your images
# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
volcanosh/volcano-admission latest a83338506638 8 seconds ago 41.4MB
volcanosh/volcano-scheduler latest faa3c2a25ac3 9 seconds ago 49.6MB
volcanosh/volcano-controllers latest 7b11606ebfb8 10 seconds ago 44.2MB
```
**NOTE**: You need ensure the images are correctly loaded in your kubernetes cluster, for
example, if you are using [kind cluster](https://github.com/kubernetes-sigs/kind),
try command ```kind load docker-image <image-name>:<tag> ``` for each of the images.
### 2. Helm charts
Second, install the required helm plugin and generate valid
certificate, volcano uses a helm plugin **gen-admission-secret** to
generate certificate for admission service to communicate with
kubernetes API server.
```
#1. Install helm plugin
helm plugin install installer/chart/volcano/plugins/gen-admission-secret
#2. Generate secret within service name
helm gen-admission-secret --service <specified-name>-admission-service --namespace <namespace>
## For eg:
kubectl create namespace volcano-trial
helm gen-admission-secret --service volcano-trial-admission-service --namespace volcano-trial
```
Finally, install helm chart.
```
helm install installer/chart/volcano --namespace <namespace> --name <specified-name>
For eg :
helm install installer/chart/volcano --namespace volcano-trial --name volcano-trial
```
**NOTE**:The ```<specified-name>``` used in the two commands above should be identical.
To Verify your installation run the below commands
To Verify your installation run the following commands:
```
#1. Verify the Running Pods
# kubectl get pods --namespace <namespace>

View File

@ -1,115 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"github.com/golang/glog"
"k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
vkbatchv1 "volcano.sh/volcano/pkg/apis/batch/v1alpha1"
vkv1 "volcano.sh/volcano/pkg/apis/batch/v1alpha1"
vkcorev1 "volcano.sh/volcano/pkg/apis/bus/v1alpha1"
)
var JobKind = vkbatchv1.SchemeGroupVersion.WithKind("Job")
var CommandKind = vkcorev1.SchemeGroupVersion.WithKind("Command")
func GetController(obj interface{}) types.UID {
accessor, err := meta.Accessor(obj)
if err != nil {
return ""
}
controllerRef := metav1.GetControllerOf(accessor)
if controllerRef != nil {
return controllerRef.UID
}
return ""
}
func ControlledBy(obj interface{}, gvk schema.GroupVersionKind) bool {
accessor, err := meta.Accessor(obj)
if err != nil {
return false
}
controllerRef := metav1.GetControllerOf(accessor)
if controllerRef != nil {
return controllerRef.Kind == gvk.Kind
}
return false
}
func CreateConfigMapIfNotExist(job *vkv1.Job, kubeClients *kubernetes.Clientset, data map[string]string, cmName string) error {
// If ConfigMap does not exist, create one for Job.
if _, err := kubeClients.CoreV1().ConfigMaps(job.Namespace).Get(cmName, metav1.GetOptions{}); err != nil {
if !apierrors.IsNotFound(err) {
glog.V(3).Infof("Failed to get Configmap for Job <%s/%s>: %v",
job.Namespace, job.Name, err)
return err
}
}
cm := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Namespace: job.Namespace,
Name: cmName,
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(job, JobKind),
},
},
Data: data,
}
if _, err := kubeClients.CoreV1().ConfigMaps(job.Namespace).Create(cm); err != nil {
glog.V(3).Infof("Failed to create ConfigMap for Job <%s/%s>: %v",
job.Namespace, job.Name, err)
return err
}
return nil
}
func DeleteConfigmap(job *vkv1.Job, kubeClients *kubernetes.Clientset, cmName string) error {
if _, err := kubeClients.CoreV1().ConfigMaps(job.Namespace).Get(cmName, metav1.GetOptions{}); err != nil {
if !apierrors.IsNotFound(err) {
glog.V(3).Infof("Failed to get Configmap for Job <%s/%s>: %v",
job.Namespace, job.Name, err)
return err
} else {
return nil
}
}
if err := kubeClients.CoreV1().ConfigMaps(job.Namespace).Delete(cmName, nil); err != nil {
if !apierrors.IsNotFound(err) {
glog.Errorf("Failed to delete Configmap of Job %v/%v: %v",
job.Namespace, job.Name, err)
return err
}
}
return nil
}

View File

@ -38,9 +38,9 @@ var _ = Describe("MPI E2E Test", func() {
},
},
plugins: map[string][]string{
"ssh": []string{},
"env": []string{},
"svc": []string{},
"ssh": {},
"env": {},
"svc": {},
},
tasks: []taskSpec{
{

View File

@ -1 +0,0 @@
test

354
vendor/github.com/hashicorp/errwrap/LICENSE generated vendored Normal file
View File

@ -0,0 +1,354 @@
Mozilla Public License, version 2.0
1. Definitions
1.1. “Contributor”
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. “Contributor Version”
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributors Contribution.
1.3. “Contribution”
means Covered Software of a particular Contributor.
1.4. “Covered Software”
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. “Incompatible With Secondary Licenses”
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of version
1.1 or earlier of the License, but not also under the terms of a
Secondary License.
1.6. “Executable Form”
means any form of the work other than Source Code Form.
1.7. “Larger Work”
means a work that combines Covered Software with other material, in a separate
file or files, that is not Covered Software.
1.8. “License”
means this document.
1.9. “Licensable”
means having the right to grant, to the maximum extent possible, whether at the
time of the initial grant or subsequently, any and all of the rights conveyed by
this License.
1.10. “Modifications”
means any of the following:
a. any file in Source Code Form that results from an addition to, deletion
from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. “Patent Claims” of a Contributor
means any patent claim(s), including without limitation, method, process,
and apparatus claims, in any patent Licensable by such Contributor that
would be infringed, but for the grant of the License, by the making,
using, selling, offering for sale, having made, import, or transfer of
either its Contributions or its Contributor Version.
1.12. “Secondary License”
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. “Source Code Form”
means the form of the work preferred for making modifications.
1.14. “You” (or “Your”)
means an individual or a legal entity exercising rights under this
License. For legal entities, “You” includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, “control” means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or as
part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its Contributions
or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution become
effective for each Contribution on the date the Contributor first distributes
such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under this
License. No additional rights or licenses will be implied from the distribution
or licensing of Covered Software under this License. Notwithstanding Section
2.1(b) above, no patent license is granted by a Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third partys
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of its
Contributions.
This License does not grant any rights in the trademarks, service marks, or
logos of any Contributor (except as may be necessary to comply with the
notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this License
(see Section 10.2) or under the terms of a Secondary License (if permitted
under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its Contributions
are its original creation(s) or it has sufficient rights to grant the
rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under applicable
copyright doctrines of fair use, fair dealing, or other equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under the
terms of this License. You must inform recipients that the Source Code Form
of the Covered Software is governed by the terms of this License, and how
they can obtain a copy of this License. You may not attempt to alter or
restrict the recipients rights in the Source Code Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this License,
or sublicense it under different terms, provided that the license for
the Executable Form does not attempt to limit or alter the recipients
rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for the
Covered Software. If the Larger Work is a combination of Covered Software
with a work governed by one or more Secondary Licenses, and the Covered
Software is not Incompatible With Secondary Licenses, this License permits
You to additionally distribute such Covered Software under the terms of
such Secondary License(s), so that the recipient of the Larger Work may, at
their option, further distribute the Covered Software under the terms of
either this License or such Secondary License(s).
3.4. Notices
You may not remove or alter the substance of any license notices (including
copyright notices, patent notices, disclaimers of warranty, or limitations
of liability) contained within the Source Code Form of the Covered
Software, except that You may alter any license notices to the extent
required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on behalf
of any Contributor. You must make it absolutely clear that any such
warranty, support, indemnity, or liability obligation is offered by You
alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute, judicial
order, or regulation then You must: (a) comply with the terms of this License
to the maximum extent possible; and (b) describe the limitations and the code
they affect. Such description must be placed in a text file included with all
distributions of the Covered Software under this License. Except to the
extent prohibited by statute or regulation, such description must be
sufficiently detailed for a recipient of ordinary skill to be able to
understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing basis,
if such Contributor fails to notify You of the non-compliance by some
reasonable means prior to 60 days after You have come back into compliance.
Moreover, Your grants from a particular Contributor are reinstated on an
ongoing basis if such Contributor notifies You of the non-compliance by
some reasonable means, this is the first time You have received notice of
non-compliance with this License from such Contributor, and You become
compliant prior to 30 days after Your receipt of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions, counter-claims,
and cross-claims) alleging that a Contributor Version directly or
indirectly infringes any patent, then the rights granted to You by any and
all Contributors for the Covered Software under Section 2.1 of this License
shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an “as is” basis, without
warranty of any kind, either expressed, implied, or statutory, including,
without limitation, warranties that the Covered Software is free of defects,
merchantable, fit for a particular purpose or non-infringing. The entire
risk as to the quality and performance of the Covered Software is with You.
Should any Covered Software prove defective in any respect, You (not any
Contributor) assume the cost of any necessary servicing, repair, or
correction. This disclaimer of warranty constitutes an essential part of this
License. No use of any Covered Software is authorized under this License
except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from such
partys negligence to the extent applicable law prohibits such limitation.
Some jurisdictions do not allow the exclusion or limitation of incidental or
consequential damages, so this exclusion and limitation may not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts of
a jurisdiction where the defendant maintains its principal place of business
and such litigation shall be governed by laws of that jurisdiction, without
reference to its conflict-of-law provisions. Nothing in this Section shall
prevent a partys ability to bring cross-claims or counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject matter
hereof. If any provision of this License is held to be unenforceable, such
provision shall be reformed only to the extent necessary to make it
enforceable. Any law or regulation which provides that the language of a
contract shall be construed against the drafter shall not be used to construe
this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version of
the License under which You originally received the Covered Software, or
under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a modified
version of this License if you rename the license and remove any
references to the name of the license steward (except to note that such
modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
If You choose to distribute Source Code Form that is Incompatible With
Secondary Licenses under the terms of this version of the License, the
notice described in Exhibit B of this License must be attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file, then
You may include the notice in a location (such as a LICENSE file in a relevant
directory) where a recipient would be likely to look for such a notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - “Incompatible With Secondary Licenses” Notice
This Source Code Form is “Incompatible
With Secondary Licenses”, as defined by
the Mozilla Public License, v. 2.0.

89
vendor/github.com/hashicorp/errwrap/README.md generated vendored Normal file
View File

@ -0,0 +1,89 @@
# errwrap
`errwrap` is a package for Go that formalizes the pattern of wrapping errors
and checking if an error contains another error.
There is a common pattern in Go of taking a returned `error` value and
then wrapping it (such as with `fmt.Errorf`) before returning it. The problem
with this pattern is that you completely lose the original `error` structure.
Arguably the _correct_ approach is that you should make a custom structure
implementing the `error` interface, and have the original error as a field
on that structure, such [as this example](http://golang.org/pkg/os/#PathError).
This is a good approach, but you have to know the entire chain of possible
rewrapping that happens, when you might just care about one.
`errwrap` formalizes this pattern (it doesn't matter what approach you use
above) by giving a single interface for wrapping errors, checking if a specific
error is wrapped, and extracting that error.
## Installation and Docs
Install using `go get github.com/hashicorp/errwrap`.
Full documentation is available at
http://godoc.org/github.com/hashicorp/errwrap
## Usage
#### Basic Usage
Below is a very basic example of its usage:
```go
// A function that always returns an error, but wraps it, like a real
// function might.
func tryOpen() error {
_, err := os.Open("/i/dont/exist")
if err != nil {
return errwrap.Wrapf("Doesn't exist: {{err}}", err)
}
return nil
}
func main() {
err := tryOpen()
// We can use the Contains helpers to check if an error contains
// another error. It is safe to do this with a nil error, or with
// an error that doesn't even use the errwrap package.
if errwrap.Contains(err, "does not exist") {
// Do something
}
if errwrap.ContainsType(err, new(os.PathError)) {
// Do something
}
// Or we can use the associated `Get` functions to just extract
// a specific error. This would return nil if that specific error doesn't
// exist.
perr := errwrap.GetType(err, new(os.PathError))
}
```
#### Custom Types
If you're already making custom types that properly wrap errors, then
you can get all the functionality of `errwraps.Contains` and such by
implementing the `Wrapper` interface with just one function. Example:
```go
type AppError {
Code ErrorCode
Err error
}
func (e *AppError) WrappedErrors() []error {
return []error{e.Err}
}
```
Now this works:
```go
err := &AppError{Err: fmt.Errorf("an error")}
if errwrap.ContainsType(err, fmt.Errorf("")) {
// This will work!
}
```

169
vendor/github.com/hashicorp/errwrap/errwrap.go generated vendored Normal file
View File

@ -0,0 +1,169 @@
// Package errwrap implements methods to formalize error wrapping in Go.
//
// All of the top-level functions that take an `error` are built to be able
// to take any error, not just wrapped errors. This allows you to use errwrap
// without having to type-check and type-cast everywhere.
package errwrap
import (
"errors"
"reflect"
"strings"
)
// WalkFunc is the callback called for Walk.
type WalkFunc func(error)
// Wrapper is an interface that can be implemented by custom types to
// have all the Contains, Get, etc. functions in errwrap work.
//
// When Walk reaches a Wrapper, it will call the callback for every
// wrapped error in addition to the wrapper itself. Since all the top-level
// functions in errwrap use Walk, this means that all those functions work
// with your custom type.
type Wrapper interface {
WrappedErrors() []error
}
// Wrap defines that outer wraps inner, returning an error type that
// can be cleanly used with the other methods in this package, such as
// Contains, GetAll, etc.
//
// This function won't modify the error message at all (the outer message
// will be used).
func Wrap(outer, inner error) error {
return &wrappedError{
Outer: outer,
Inner: inner,
}
}
// Wrapf wraps an error with a formatting message. This is similar to using
// `fmt.Errorf` to wrap an error. If you're using `fmt.Errorf` to wrap
// errors, you should replace it with this.
//
// format is the format of the error message. The string '{{err}}' will
// be replaced with the original error message.
func Wrapf(format string, err error) error {
outerMsg := "<nil>"
if err != nil {
outerMsg = err.Error()
}
outer := errors.New(strings.Replace(
format, "{{err}}", outerMsg, -1))
return Wrap(outer, err)
}
// Contains checks if the given error contains an error with the
// message msg. If err is not a wrapped error, this will always return
// false unless the error itself happens to match this msg.
func Contains(err error, msg string) bool {
return len(GetAll(err, msg)) > 0
}
// ContainsType checks if the given error contains an error with
// the same concrete type as v. If err is not a wrapped error, this will
// check the err itself.
func ContainsType(err error, v interface{}) bool {
return len(GetAllType(err, v)) > 0
}
// Get is the same as GetAll but returns the deepest matching error.
func Get(err error, msg string) error {
es := GetAll(err, msg)
if len(es) > 0 {
return es[len(es)-1]
}
return nil
}
// GetType is the same as GetAllType but returns the deepest matching error.
func GetType(err error, v interface{}) error {
es := GetAllType(err, v)
if len(es) > 0 {
return es[len(es)-1]
}
return nil
}
// GetAll gets all the errors that might be wrapped in err with the
// given message. The order of the errors is such that the outermost
// matching error (the most recent wrap) is index zero, and so on.
func GetAll(err error, msg string) []error {
var result []error
Walk(err, func(err error) {
if err.Error() == msg {
result = append(result, err)
}
})
return result
}
// GetAllType gets all the errors that are the same type as v.
//
// The order of the return value is the same as described in GetAll.
func GetAllType(err error, v interface{}) []error {
var result []error
var search string
if v != nil {
search = reflect.TypeOf(v).String()
}
Walk(err, func(err error) {
var needle string
if err != nil {
needle = reflect.TypeOf(err).String()
}
if needle == search {
result = append(result, err)
}
})
return result
}
// Walk walks all the wrapped errors in err and calls the callback. If
// err isn't a wrapped error, this will be called once for err. If err
// is a wrapped error, the callback will be called for both the wrapper
// that implements error as well as the wrapped error itself.
func Walk(err error, cb WalkFunc) {
if err == nil {
return
}
switch e := err.(type) {
case *wrappedError:
cb(e.Outer)
Walk(e.Inner, cb)
case Wrapper:
cb(err)
for _, err := range e.WrappedErrors() {
Walk(err, cb)
}
default:
cb(err)
}
}
// wrappedError is an implementation of error that has both the
// outer and inner errors.
type wrappedError struct {
Outer error
Inner error
}
func (w *wrappedError) Error() string {
return w.Outer.Error()
}
func (w *wrappedError) WrappedErrors() []error {
return []error{w.Outer, w.Inner}
}

1
vendor/github.com/hashicorp/errwrap/go.mod generated vendored Normal file
View File

@ -0,0 +1 @@
module github.com/hashicorp/errwrap

12
vendor/github.com/hashicorp/go-multierror/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,12 @@
sudo: false
language: go
go:
- 1.x
branches:
only:
- master
script: make test testrace

353
vendor/github.com/hashicorp/go-multierror/LICENSE generated vendored Normal file
View File

@ -0,0 +1,353 @@
Mozilla Public License, version 2.0
1. Definitions
1.1. “Contributor”
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. “Contributor Version”
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributors Contribution.
1.3. “Contribution”
means Covered Software of a particular Contributor.
1.4. “Covered Software”
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. “Incompatible With Secondary Licenses”
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of version
1.1 or earlier of the License, but not also under the terms of a
Secondary License.
1.6. “Executable Form”
means any form of the work other than Source Code Form.
1.7. “Larger Work”
means a work that combines Covered Software with other material, in a separate
file or files, that is not Covered Software.
1.8. “License”
means this document.
1.9. “Licensable”
means having the right to grant, to the maximum extent possible, whether at the
time of the initial grant or subsequently, any and all of the rights conveyed by
this License.
1.10. “Modifications”
means any of the following:
a. any file in Source Code Form that results from an addition to, deletion
from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. “Patent Claims” of a Contributor
means any patent claim(s), including without limitation, method, process,
and apparatus claims, in any patent Licensable by such Contributor that
would be infringed, but for the grant of the License, by the making,
using, selling, offering for sale, having made, import, or transfer of
either its Contributions or its Contributor Version.
1.12. “Secondary License”
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. “Source Code Form”
means the form of the work preferred for making modifications.
1.14. “You” (or “Your”)
means an individual or a legal entity exercising rights under this
License. For legal entities, “You” includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, “control” means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or as
part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its Contributions
or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution become
effective for each Contribution on the date the Contributor first distributes
such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under this
License. No additional rights or licenses will be implied from the distribution
or licensing of Covered Software under this License. Notwithstanding Section
2.1(b) above, no patent license is granted by a Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third partys
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of its
Contributions.
This License does not grant any rights in the trademarks, service marks, or
logos of any Contributor (except as may be necessary to comply with the
notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this License
(see Section 10.2) or under the terms of a Secondary License (if permitted
under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its Contributions
are its original creation(s) or it has sufficient rights to grant the
rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under applicable
copyright doctrines of fair use, fair dealing, or other equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under the
terms of this License. You must inform recipients that the Source Code Form
of the Covered Software is governed by the terms of this License, and how
they can obtain a copy of this License. You may not attempt to alter or
restrict the recipients rights in the Source Code Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this License,
or sublicense it under different terms, provided that the license for
the Executable Form does not attempt to limit or alter the recipients
rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for the
Covered Software. If the Larger Work is a combination of Covered Software
with a work governed by one or more Secondary Licenses, and the Covered
Software is not Incompatible With Secondary Licenses, this License permits
You to additionally distribute such Covered Software under the terms of
such Secondary License(s), so that the recipient of the Larger Work may, at
their option, further distribute the Covered Software under the terms of
either this License or such Secondary License(s).
3.4. Notices
You may not remove or alter the substance of any license notices (including
copyright notices, patent notices, disclaimers of warranty, or limitations
of liability) contained within the Source Code Form of the Covered
Software, except that You may alter any license notices to the extent
required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on behalf
of any Contributor. You must make it absolutely clear that any such
warranty, support, indemnity, or liability obligation is offered by You
alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute, judicial
order, or regulation then You must: (a) comply with the terms of this License
to the maximum extent possible; and (b) describe the limitations and the code
they affect. Such description must be placed in a text file included with all
distributions of the Covered Software under this License. Except to the
extent prohibited by statute or regulation, such description must be
sufficiently detailed for a recipient of ordinary skill to be able to
understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing basis,
if such Contributor fails to notify You of the non-compliance by some
reasonable means prior to 60 days after You have come back into compliance.
Moreover, Your grants from a particular Contributor are reinstated on an
ongoing basis if such Contributor notifies You of the non-compliance by
some reasonable means, this is the first time You have received notice of
non-compliance with this License from such Contributor, and You become
compliant prior to 30 days after Your receipt of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions, counter-claims,
and cross-claims) alleging that a Contributor Version directly or
indirectly infringes any patent, then the rights granted to You by any and
all Contributors for the Covered Software under Section 2.1 of this License
shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an “as is” basis, without
warranty of any kind, either expressed, implied, or statutory, including,
without limitation, warranties that the Covered Software is free of defects,
merchantable, fit for a particular purpose or non-infringing. The entire
risk as to the quality and performance of the Covered Software is with You.
Should any Covered Software prove defective in any respect, You (not any
Contributor) assume the cost of any necessary servicing, repair, or
correction. This disclaimer of warranty constitutes an essential part of this
License. No use of any Covered Software is authorized under this License
except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from such
partys negligence to the extent applicable law prohibits such limitation.
Some jurisdictions do not allow the exclusion or limitation of incidental or
consequential damages, so this exclusion and limitation may not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts of
a jurisdiction where the defendant maintains its principal place of business
and such litigation shall be governed by laws of that jurisdiction, without
reference to its conflict-of-law provisions. Nothing in this Section shall
prevent a partys ability to bring cross-claims or counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject matter
hereof. If any provision of this License is held to be unenforceable, such
provision shall be reformed only to the extent necessary to make it
enforceable. Any law or regulation which provides that the language of a
contract shall be construed against the drafter shall not be used to construe
this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version of
the License under which You originally received the Covered Software, or
under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a modified
version of this License if you rename the license and remove any
references to the name of the license steward (except to note that such
modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
If You choose to distribute Source Code Form that is Incompatible With
Secondary Licenses under the terms of this version of the License, the
notice described in Exhibit B of this License must be attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file, then
You may include the notice in a location (such as a LICENSE file in a relevant
directory) where a recipient would be likely to look for such a notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - “Incompatible With Secondary Licenses” Notice
This Source Code Form is “Incompatible
With Secondary Licenses”, as defined by
the Mozilla Public License, v. 2.0.

31
vendor/github.com/hashicorp/go-multierror/Makefile generated vendored Normal file
View File

@ -0,0 +1,31 @@
TEST?=./...
default: test
# test runs the test suite and vets the code.
test: generate
@echo "==> Running tests..."
@go list $(TEST) \
| grep -v "/vendor/" \
| xargs -n1 go test -timeout=60s -parallel=10 ${TESTARGS}
# testrace runs the race checker
testrace: generate
@echo "==> Running tests (race)..."
@go list $(TEST) \
| grep -v "/vendor/" \
| xargs -n1 go test -timeout=60s -race ${TESTARGS}
# updatedeps installs all the dependencies needed to run and build.
updatedeps:
@sh -c "'${CURDIR}/scripts/deps.sh' '${NAME}'"
# generate runs `go generate` to build the dynamically generated source files.
generate:
@echo "==> Generating..."
@find . -type f -name '.DS_Store' -delete
@go list ./... \
| grep -v "/vendor/" \
| xargs -n1 go generate
.PHONY: default test testrace updatedeps generate

97
vendor/github.com/hashicorp/go-multierror/README.md generated vendored Normal file
View File

@ -0,0 +1,97 @@
# go-multierror
[![Build Status](http://img.shields.io/travis/hashicorp/go-multierror.svg?style=flat-square)][travis]
[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs]
[travis]: https://travis-ci.org/hashicorp/go-multierror
[godocs]: https://godoc.org/github.com/hashicorp/go-multierror
`go-multierror` is a package for Go that provides a mechanism for
representing a list of `error` values as a single `error`.
This allows a function in Go to return an `error` that might actually
be a list of errors. If the caller knows this, they can unwrap the
list and access the errors. If the caller doesn't know, the error
formats to a nice human-readable format.
`go-multierror` implements the
[errwrap](https://github.com/hashicorp/errwrap) interface so that it can
be used with that library, as well.
## Installation and Docs
Install using `go get github.com/hashicorp/go-multierror`.
Full documentation is available at
http://godoc.org/github.com/hashicorp/go-multierror
## Usage
go-multierror is easy to use and purposely built to be unobtrusive in
existing Go applications/libraries that may not be aware of it.
**Building a list of errors**
The `Append` function is used to create a list of errors. This function
behaves a lot like the Go built-in `append` function: it doesn't matter
if the first argument is nil, a `multierror.Error`, or any other `error`,
the function behaves as you would expect.
```go
var result error
if err := step1(); err != nil {
result = multierror.Append(result, err)
}
if err := step2(); err != nil {
result = multierror.Append(result, err)
}
return result
```
**Customizing the formatting of the errors**
By specifying a custom `ErrorFormat`, you can customize the format
of the `Error() string` function:
```go
var result *multierror.Error
// ... accumulate errors here, maybe using Append
if result != nil {
result.ErrorFormat = func([]error) string {
return "errors!"
}
}
```
**Accessing the list of errors**
`multierror.Error` implements `error` so if the caller doesn't know about
multierror, it will work just fine. But if you're aware a multierror might
be returned, you can use type switches to access the list of errors:
```go
if err := something(); err != nil {
if merr, ok := err.(*multierror.Error); ok {
// Use merr.Errors
}
}
```
**Returning a multierror only if there are errors**
If you build a `multierror.Error`, you can use the `ErrorOrNil` function
to return an `error` implementation only if there are errors to return:
```go
var result *multierror.Error
// ... accumulate errors here
// Return the `error` only if errors were added to the multierror, otherwise
// return nil since there are no errors.
return result.ErrorOrNil()
```

41
vendor/github.com/hashicorp/go-multierror/append.go generated vendored Normal file
View File

@ -0,0 +1,41 @@
package multierror
// Append is a helper function that will append more errors
// onto an Error in order to create a larger multi-error.
//
// If err is not a multierror.Error, then it will be turned into
// one. If any of the errs are multierr.Error, they will be flattened
// one level into err.
func Append(err error, errs ...error) *Error {
switch err := err.(type) {
case *Error:
// Typed nils can reach here, so initialize if we are nil
if err == nil {
err = new(Error)
}
// Go through each error and flatten
for _, e := range errs {
switch e := e.(type) {
case *Error:
if e != nil {
err.Errors = append(err.Errors, e.Errors...)
}
default:
if e != nil {
err.Errors = append(err.Errors, e)
}
}
}
return err
default:
newErrs := make([]error, 0, len(errs)+1)
if err != nil {
newErrs = append(newErrs, err)
}
newErrs = append(newErrs, errs...)
return Append(&Error{}, newErrs...)
}
}

26
vendor/github.com/hashicorp/go-multierror/flatten.go generated vendored Normal file
View File

@ -0,0 +1,26 @@
package multierror
// Flatten flattens the given error, merging any *Errors together into
// a single *Error.
func Flatten(err error) error {
// If it isn't an *Error, just return the error as-is
if _, ok := err.(*Error); !ok {
return err
}
// Otherwise, make the result and flatten away!
flatErr := new(Error)
flatten(err, flatErr)
return flatErr
}
func flatten(err error, flatErr *Error) {
switch err := err.(type) {
case *Error:
for _, e := range err.Errors {
flatten(e, flatErr)
}
default:
flatErr.Errors = append(flatErr.Errors, err)
}
}

27
vendor/github.com/hashicorp/go-multierror/format.go generated vendored Normal file
View File

@ -0,0 +1,27 @@
package multierror
import (
"fmt"
"strings"
)
// ErrorFormatFunc is a function callback that is called by Error to
// turn the list of errors into a string.
type ErrorFormatFunc func([]error) string
// ListFormatFunc is a basic formatter that outputs the number of errors
// that occurred along with a bullet point list of the errors.
func ListFormatFunc(es []error) string {
if len(es) == 1 {
return fmt.Sprintf("1 error occurred:\n\t* %s\n\n", es[0])
}
points := make([]string, len(es))
for i, err := range es {
points[i] = fmt.Sprintf("* %s", err)
}
return fmt.Sprintf(
"%d errors occurred:\n\t%s\n\n",
len(es), strings.Join(points, "\n\t"))
}

3
vendor/github.com/hashicorp/go-multierror/go.mod generated vendored Normal file
View File

@ -0,0 +1,3 @@
module github.com/hashicorp/go-multierror
require github.com/hashicorp/errwrap v1.0.0

4
vendor/github.com/hashicorp/go-multierror/go.sum generated vendored Normal file
View File

@ -0,0 +1,4 @@
github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce h1:prjrVgOk2Yg6w+PflHoszQNLTUh4kaByUcEWM/9uin4=
github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=

View File

@ -0,0 +1,51 @@
package multierror
import (
"fmt"
)
// Error is an error type to track multiple errors. This is used to
// accumulate errors in cases and return them as a single "error".
type Error struct {
Errors []error
ErrorFormat ErrorFormatFunc
}
func (e *Error) Error() string {
fn := e.ErrorFormat
if fn == nil {
fn = ListFormatFunc
}
return fn(e.Errors)
}
// ErrorOrNil returns an error interface if this Error represents
// a list of errors, or returns nil if the list of errors is empty. This
// function is useful at the end of accumulation to make sure that the value
// returned represents the existence of errors.
func (e *Error) ErrorOrNil() error {
if e == nil {
return nil
}
if len(e.Errors) == 0 {
return nil
}
return e
}
func (e *Error) GoString() string {
return fmt.Sprintf("*%#v", *e)
}
// WrappedErrors returns the list of errors that this Error is wrapping.
// It is an implementation of the errwrap.Wrapper interface so that
// multierror.Error can be used with that library.
//
// This method is not safe to be called concurrently and is no different
// than accessing the Errors field directly. It is implemented only to
// satisfy the errwrap.Wrapper interface.
func (e *Error) WrappedErrors() []error {
return e.Errors
}

37
vendor/github.com/hashicorp/go-multierror/prefix.go generated vendored Normal file
View File

@ -0,0 +1,37 @@
package multierror
import (
"fmt"
"github.com/hashicorp/errwrap"
)
// Prefix is a helper function that will prefix some text
// to the given error. If the error is a multierror.Error, then
// it will be prefixed to each wrapped error.
//
// This is useful to use when appending multiple multierrors
// together in order to give better scoping.
func Prefix(err error, prefix string) error {
if err == nil {
return nil
}
format := fmt.Sprintf("%s {{err}}", prefix)
switch err := err.(type) {
case *Error:
// Typed nils can reach here, so initialize if we are nil
if err == nil {
err = new(Error)
}
// Wrap each of the errors
for i, e := range err.Errors {
err.Errors[i] = errwrap.Wrapf(format, e)
}
return err
default:
return errwrap.Wrapf(format, err)
}
}

16
vendor/github.com/hashicorp/go-multierror/sort.go generated vendored Normal file
View File

@ -0,0 +1,16 @@
package multierror
// Len implements sort.Interface function for length
func (err Error) Len() int {
return len(err.Errors)
}
// Swap implements sort.Interface function for swapping elements
func (err Error) Swap(i, j int) {
err.Errors[i], err.Errors[j] = err.Errors[j], err.Errors[i]
}
// Less implements sort.Interface function for determining order
func (err Error) Less(i, j int) bool {
return err.Errors[i].Error() < err.Errors[j].Error()
}

201
vendor/github.com/kubernetes-sigs/kube-batch/LICENSE generated vendored Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,88 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package options
import (
"fmt"
"time"
"github.com/spf13/pflag"
)
const (
defaultSchedulerName = "kube-batch"
defaultSchedulerPeriod = time.Second
defaultQueue = "default"
defaultListenAddress = ":8080"
)
// ServerOption is the main context object for the controller manager.
type ServerOption struct {
Master string
Kubeconfig string
SchedulerName string
SchedulerConf string
SchedulePeriod time.Duration
EnableLeaderElection bool
LockObjectNamespace string
DefaultQueue string
PrintVersion bool
ListenAddress string
EnablePriorityClass bool
}
// ServerOpts server options
var ServerOpts *ServerOption
// NewServerOption creates a new CMServer with a default config.
func NewServerOption() *ServerOption {
s := ServerOption{}
return &s
}
// AddFlags adds flags for a specific CMServer to the specified FlagSet
func (s *ServerOption) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&s.Master, "master", s.Master, "The address of the Kubernetes API server (overrides any value in kubeconfig)")
fs.StringVar(&s.Kubeconfig, "kubeconfig", s.Kubeconfig, "Path to kubeconfig file with authorization and master location information")
// kube-batch will ignore pods with scheduler names other than specified with the option
fs.StringVar(&s.SchedulerName, "scheduler-name", defaultSchedulerName, "kube-batch will handle pods whose .spec.SchedulerName is same as scheduler-name")
fs.StringVar(&s.SchedulerConf, "scheduler-conf", "", "The absolute path of scheduler configuration file")
fs.DurationVar(&s.SchedulePeriod, "schedule-period", defaultSchedulerPeriod, "The period between each scheduling cycle")
fs.StringVar(&s.DefaultQueue, "default-queue", defaultQueue, "The default queue name of the job")
fs.BoolVar(&s.EnableLeaderElection, "leader-elect", s.EnableLeaderElection,
"Start a leader election client and gain leadership before "+
"executing the main loop. Enable this when running replicated kube-batch for high availability")
fs.BoolVar(&s.PrintVersion, "version", false, "Show version and quit")
fs.StringVar(&s.LockObjectNamespace, "lock-object-namespace", s.LockObjectNamespace, "Define the namespace of the lock object that is used for leader election")
fs.StringVar(&s.ListenAddress, "listen-address", defaultListenAddress, "The address to listen on for HTTP requests.")
fs.BoolVar(&s.EnablePriorityClass, "priority-class", true,
"Enable PriorityClass to provide the capacity of preemption at pod group level; to disable it, set it false")
}
// CheckOptionOrDie check lock-object-namespace when LeaderElection is enabled
func (s *ServerOption) CheckOptionOrDie() error {
if s.EnableLeaderElection && s.LockObjectNamespace == "" {
return fmt.Errorf("lock-object-namespace must not be nil when LeaderElection is enabled")
}
return nil
}
// RegisterOptions registers options
func (s *ServerOption) RegisterOptions() {
ServerOpts = s
}

View File

@ -0,0 +1,140 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package app
import (
"context"
"fmt"
"net/http"
"os"
"time"
"github.com/golang/glog"
"github.com/kubernetes-sigs/kube-batch/cmd/kube-batch/app/options"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler"
"github.com/kubernetes-sigs/kube-batch/pkg/version"
"github.com/prometheus/client_golang/prometheus/promhttp"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
// Register gcp auth
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
"k8s.io/client-go/rest"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/leaderelection/resourcelock"
"k8s.io/client-go/tools/record"
)
const (
leaseDuration = 15 * time.Second
renewDeadline = 10 * time.Second
retryPeriod = 5 * time.Second
apiVersion = "v1alpha1"
)
func buildConfig(master, kubeconfig string) (*rest.Config, error) {
if master != "" || kubeconfig != "" {
return clientcmd.BuildConfigFromFlags(master, kubeconfig)
}
return rest.InClusterConfig()
}
// Run the kubeBatch scheduler
func Run(opt *options.ServerOption) error {
if opt.PrintVersion {
version.PrintVersionAndExit(apiVersion)
}
config, err := buildConfig(opt.Master, opt.Kubeconfig)
if err != nil {
return err
}
// Start policy controller to allocate resources.
sched, err := scheduler.NewScheduler(config,
opt.SchedulerName,
opt.SchedulerConf,
opt.SchedulePeriod,
opt.DefaultQueue)
if err != nil {
panic(err)
}
go func() {
http.Handle("/metrics", promhttp.Handler())
glog.Fatalf("Prometheus Http Server failed %s", http.ListenAndServe(opt.ListenAddress, nil))
}()
run := func(ctx context.Context) {
sched.Run(ctx.Done())
<-ctx.Done()
}
if !opt.EnableLeaderElection {
run(context.TODO())
return fmt.Errorf("finished without leader elect")
}
leaderElectionClient, err := clientset.NewForConfig(restclient.AddUserAgent(config, "leader-election"))
if err != nil {
return err
}
// Prepare event clients.
broadcaster := record.NewBroadcaster()
broadcaster.StartRecordingToSink(&corev1.EventSinkImpl{Interface: leaderElectionClient.CoreV1().Events(opt.LockObjectNamespace)})
eventRecorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "kube-batch"})
hostname, err := os.Hostname()
if err != nil {
return fmt.Errorf("unable to get hostname: %v", err)
}
// add a uniquifier so that two processes on the same host don't accidentally both become active
id := hostname + "_" + string(uuid.NewUUID())
rl, err := resourcelock.New(resourcelock.ConfigMapsResourceLock,
opt.LockObjectNamespace,
"kube-batch",
leaderElectionClient.CoreV1(),
resourcelock.ResourceLockConfig{
Identity: id,
EventRecorder: eventRecorder,
})
if err != nil {
return fmt.Errorf("couldn't create resource lock: %v", err)
}
leaderelection.RunOrDie(context.TODO(), leaderelection.LeaderElectionConfig{
Lock: rl,
LeaseDuration: leaseDuration,
RenewDeadline: renewDeadline,
RetryPeriod: retryPeriod,
Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: run,
OnStoppedLeading: func() {
glog.Fatalf("leaderelection lost")
},
},
})
return fmt.Errorf("lost lease")
}

View File

@ -0,0 +1,18 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package
package v1alpha1

View File

@ -0,0 +1,21 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
// GroupNameAnnotationKey is the annotation key of Pod to identify
// which PodGroup it belongs to.
const GroupNameAnnotationKey = "scheduling.k8s.io/group-name"

View File

@ -0,0 +1,57 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
var (
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
AddToScheme = SchemeBuilder.AddToScheme
)
const (
// GroupName is the group name used in this package.
GroupName = "scheduling.incubator.k8s.io"
// GroupVersion is the version of scheduling group
GroupVersion = "v1alpha1"
)
// SchemeGroupVersion is the group version used to register these objects.
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: GroupVersion}
// Resource takes an unqualified resource and returns a Group-qualified GroupResource.
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
// addKnownTypes adds the set of types defined in this package to the supplied scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&PodGroup{},
&PodGroupList{},
&Queue{},
&QueueList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}

View File

@ -0,0 +1,222 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// PodGroupPhase is the phase of a pod group at the current time.
type PodGroupPhase string
// These are the valid phase of podGroups.
const (
// PodPending means the pod group has been accepted by the system, but scheduler can not allocate
// enough resources to it.
PodGroupPending PodGroupPhase = "Pending"
// PodRunning means `spec.minMember` pods of PodGroups has been in running phase.
PodGroupRunning PodGroupPhase = "Running"
// PodGroupUnknown means part of `spec.minMember` pods are running but the other part can not
// be scheduled, e.g. not enough resource; scheduler will wait for related controller to recover it.
PodGroupUnknown PodGroupPhase = "Unknown"
// PodGroupInqueue means controllers can start to create pods,
// is a new state between PodGroupPending and PodGroupRunning
PodGroupInqueue PodGroupPhase = "Inqueue"
)
type PodGroupConditionType string
const (
PodGroupUnschedulableType PodGroupConditionType = "Unschedulable"
)
// PodGroupCondition contains details for the current state of this pod group.
type PodGroupCondition struct {
// Type is the type of the condition
Type PodGroupConditionType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type"`
// Status is the status of the condition.
Status v1.ConditionStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"`
// The ID of condition transition.
TransitionID string `json:"transitionID,omitempty" protobuf:"bytes,3,opt,name=transitionID"`
// Last time the phase transitioned from another to current phase.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"`
// Unique, one-word, CamelCase reason for the phase's last transition.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"`
// Human-readable message indicating details about last transition.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
}
const (
// PodFailedReason is probed if pod of PodGroup failed
PodFailedReason string = "PodFailed"
// PodDeletedReason is probed if pod of PodGroup deleted
PodDeletedReason string = "PodDeleted"
// NotEnoughResourcesReason is probed if there're not enough resources to schedule pods
NotEnoughResourcesReason string = "NotEnoughResources"
// NotEnoughPodsReason is probed if there're not enough tasks compared to `spec.minMember`
NotEnoughPodsReason string = "NotEnoughTasks"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodGroup is a collection of Pod; used for batch workload.
type PodGroup struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Specification of the desired behavior of the pod group.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec PodGroupSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Status represents the current information about a pod group.
// This data may not be up to date.
// +optional
Status PodGroupStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// PodGroupSpec represents the template of a pod group.
type PodGroupSpec struct {
// MinMember defines the minimal number of members/tasks to run the pod group;
// if there's not enough resources to start all tasks, the scheduler
// will not start anyone.
MinMember int32 `json:"minMember,omitempty" protobuf:"bytes,1,opt,name=minMember"`
// Queue defines the queue to allocate resource for PodGroup; if queue does not exist,
// the PodGroup will not be scheduled.
Queue string `json:"queue,omitempty" protobuf:"bytes,2,opt,name=queue"`
// If specified, indicates the PodGroup's priority. "system-node-critical" and
// "system-cluster-critical" are two special keywords which indicate the
// highest priorities with the former being the highest priority. Any other
// name must be defined by creating a PriorityClass object with that name.
// If not specified, the PodGroup priority will be default or zero if there is no
// default.
// +optional
PriorityClassName string `json:"priorityClassName,omitempty" protobuf:"bytes,3,opt,name=priorityClassName"`
// MinResources defines the minimal resource of members/tasks to run the pod group;
// if there's not enough resources to start all tasks, the scheduler
// will not start anyone.
MinResources *v1.ResourceList `json:"minResources,omitempty" protobuf:"bytes,4,opt,name=minResources"`
}
// PodGroupStatus represents the current state of a pod group.
type PodGroupStatus struct {
// Current phase of PodGroup.
Phase PodGroupPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase"`
// The conditions of PodGroup.
// +optional
Conditions []PodGroupCondition `json:"conditions,omitempty" protobuf:"bytes,2,opt,name=conditions"`
// The number of actively running pods.
// +optional
Running int32 `json:"running,omitempty" protobuf:"bytes,3,opt,name=running"`
// The number of pods which reached phase Succeeded.
// +optional
Succeeded int32 `json:"succeeded,omitempty" protobuf:"bytes,4,opt,name=succeeded"`
// The number of pods which reached phase Failed.
// +optional
Failed int32 `json:"failed,omitempty" protobuf:"bytes,5,opt,name=failed"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodGroupList is a collection of pod groups.
type PodGroupList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// items is the list of PodGroup
Items []PodGroup `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Queue is a queue of PodGroup.
type Queue struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Specification of the desired behavior of the queue.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec QueueSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// The status of queue.
// +optional
Status QueueStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// QueueStatus represents the status of Queue.
type QueueStatus struct {
// The number of 'Unknonw' PodGroup in this queue.
Unknown int32 `json:"unknown,omitempty" protobuf:"bytes,1,opt,name=unknown"`
// The number of 'Pending' PodGroup in this queue.
Pending int32 `json:"pending,omitempty" protobuf:"bytes,2,opt,name=pending"`
// The number of 'Running' PodGroup in this queue.
Running int32 `json:"running,omitempty" protobuf:"bytes,3,opt,name=running"`
}
// QueueSpec represents the template of Queue.
type QueueSpec struct {
Weight int32 `json:"weight,omitempty" protobuf:"bytes,1,opt,name=weight"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// QueueList is a collection of queues.
type QueueList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// items is the list of PodGroup
Items []Queue `json:"items" protobuf:"bytes,2,rep,name=items"`
}

View File

@ -0,0 +1,248 @@
// +build !ignore_autogenerated
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha1
import (
v1 "k8s.io/api/core/v1"
resource "k8s.io/apimachinery/pkg/api/resource"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodGroup) DeepCopyInto(out *PodGroup) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodGroup.
func (in *PodGroup) DeepCopy() *PodGroup {
if in == nil {
return nil
}
out := new(PodGroup)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodGroup) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodGroupCondition) DeepCopyInto(out *PodGroupCondition) {
*out = *in
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodGroupCondition.
func (in *PodGroupCondition) DeepCopy() *PodGroupCondition {
if in == nil {
return nil
}
out := new(PodGroupCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodGroupList) DeepCopyInto(out *PodGroupList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]PodGroup, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodGroupList.
func (in *PodGroupList) DeepCopy() *PodGroupList {
if in == nil {
return nil
}
out := new(PodGroupList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodGroupList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodGroupSpec) DeepCopyInto(out *PodGroupSpec) {
*out = *in
if in.MinResources != nil {
in, out := &in.MinResources, &out.MinResources
*out = new(v1.ResourceList)
if **in != nil {
in, out := *in, *out
*out = make(map[v1.ResourceName]resource.Quantity, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodGroupSpec.
func (in *PodGroupSpec) DeepCopy() *PodGroupSpec {
if in == nil {
return nil
}
out := new(PodGroupSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodGroupStatus) DeepCopyInto(out *PodGroupStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]PodGroupCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodGroupStatus.
func (in *PodGroupStatus) DeepCopy() *PodGroupStatus {
if in == nil {
return nil
}
out := new(PodGroupStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Queue) DeepCopyInto(out *Queue) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Spec = in.Spec
out.Status = in.Status
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Queue.
func (in *Queue) DeepCopy() *Queue {
if in == nil {
return nil
}
out := new(Queue)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Queue) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *QueueList) DeepCopyInto(out *QueueList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Queue, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueList.
func (in *QueueList) DeepCopy() *QueueList {
if in == nil {
return nil
}
out := new(QueueList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *QueueList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *QueueSpec) DeepCopyInto(out *QueueSpec) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueSpec.
func (in *QueueSpec) DeepCopy() *QueueSpec {
if in == nil {
return nil
}
out := new(QueueSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *QueueStatus) DeepCopyInto(out *QueueStatus) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueStatus.
func (in *QueueStatus) DeepCopy() *QueueStatus {
if in == nil {
return nil
}
out := new(QueueStatus)
in.DeepCopyInto(out)
return out
}

View File

@ -0,0 +1,37 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
)
func GetController(obj interface{}) types.UID {
accessor, err := meta.Accessor(obj)
if err != nil {
return ""
}
controllerRef := metav1.GetControllerOf(accessor)
if controllerRef != nil {
return controllerRef.UID
}
return ""
}

View File

@ -0,0 +1,98 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package versioned
import (
schedulingv1alpha1 "github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/typed/scheduling/v1alpha1"
discovery "k8s.io/client-go/discovery"
rest "k8s.io/client-go/rest"
flowcontrol "k8s.io/client-go/util/flowcontrol"
)
type Interface interface {
Discovery() discovery.DiscoveryInterface
SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface
// Deprecated: please explicitly pick a version if possible.
Scheduling() schedulingv1alpha1.SchedulingV1alpha1Interface
}
// Clientset contains the clients for groups. Each group has exactly one
// version included in a Clientset.
type Clientset struct {
*discovery.DiscoveryClient
schedulingV1alpha1 *schedulingv1alpha1.SchedulingV1alpha1Client
}
// SchedulingV1alpha1 retrieves the SchedulingV1alpha1Client
func (c *Clientset) SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface {
return c.schedulingV1alpha1
}
// Deprecated: Scheduling retrieves the default version of SchedulingClient.
// Please explicitly pick a version.
func (c *Clientset) Scheduling() schedulingv1alpha1.SchedulingV1alpha1Interface {
return c.schedulingV1alpha1
}
// Discovery retrieves the DiscoveryClient
func (c *Clientset) Discovery() discovery.DiscoveryInterface {
if c == nil {
return nil
}
return c.DiscoveryClient
}
// NewForConfig creates a new Clientset for the given config.
func NewForConfig(c *rest.Config) (*Clientset, error) {
configShallowCopy := *c
if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 {
configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst)
}
var cs Clientset
var err error
cs.schedulingV1alpha1, err = schedulingv1alpha1.NewForConfig(&configShallowCopy)
if err != nil {
return nil, err
}
cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy)
if err != nil {
return nil, err
}
return &cs, nil
}
// NewForConfigOrDie creates a new Clientset for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *Clientset {
var cs Clientset
cs.schedulingV1alpha1 = schedulingv1alpha1.NewForConfigOrDie(c)
cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c)
return &cs
}
// New creates a new Clientset for the given RESTClient.
func New(c rest.Interface) *Clientset {
var cs Clientset
cs.schedulingV1alpha1 = schedulingv1alpha1.New(c)
cs.DiscoveryClient = discovery.NewDiscoveryClient(c)
return &cs
}

View File

@ -0,0 +1,20 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
// This package has the automatically generated clientset.
package versioned

View File

@ -0,0 +1,20 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
// This package contains the scheme of the automatically generated clientset.
package scheme

View File

@ -0,0 +1,56 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package scheme
import (
schedulingv1alpha1 "github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
)
var Scheme = runtime.NewScheme()
var Codecs = serializer.NewCodecFactory(Scheme)
var ParameterCodec = runtime.NewParameterCodec(Scheme)
var localSchemeBuilder = runtime.SchemeBuilder{
schedulingv1alpha1.AddToScheme,
}
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
// of clientsets, like in:
//
// import (
// "k8s.io/client-go/kubernetes"
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
// )
//
// kclientset, _ := kubernetes.NewForConfig(c)
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
//
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
// correctly.
var AddToScheme = localSchemeBuilder.AddToScheme
func init() {
v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
utilruntime.Must(AddToScheme(Scheme))
}

View File

@ -0,0 +1,20 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
// This package has the automatically generated typed clients.
package v1alpha1

View File

@ -0,0 +1,23 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
type PodGroupExpansion interface{}
type QueueExpansion interface{}

View File

@ -0,0 +1,191 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
import (
"time"
v1alpha1 "github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1"
scheme "github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
rest "k8s.io/client-go/rest"
)
// PodGroupsGetter has a method to return a PodGroupInterface.
// A group's client should implement this interface.
type PodGroupsGetter interface {
PodGroups(namespace string) PodGroupInterface
}
// PodGroupInterface has methods to work with PodGroup resources.
type PodGroupInterface interface {
Create(*v1alpha1.PodGroup) (*v1alpha1.PodGroup, error)
Update(*v1alpha1.PodGroup) (*v1alpha1.PodGroup, error)
UpdateStatus(*v1alpha1.PodGroup) (*v1alpha1.PodGroup, error)
Delete(name string, options *v1.DeleteOptions) error
DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
Get(name string, options v1.GetOptions) (*v1alpha1.PodGroup, error)
List(opts v1.ListOptions) (*v1alpha1.PodGroupList, error)
Watch(opts v1.ListOptions) (watch.Interface, error)
Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PodGroup, err error)
PodGroupExpansion
}
// podGroups implements PodGroupInterface
type podGroups struct {
client rest.Interface
ns string
}
// newPodGroups returns a PodGroups
func newPodGroups(c *SchedulingV1alpha1Client, namespace string) *podGroups {
return &podGroups{
client: c.RESTClient(),
ns: namespace,
}
}
// Get takes name of the podGroup, and returns the corresponding podGroup object, and an error if there is any.
func (c *podGroups) Get(name string, options v1.GetOptions) (result *v1alpha1.PodGroup, err error) {
result = &v1alpha1.PodGroup{}
err = c.client.Get().
Namespace(c.ns).
Resource("podgroups").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do().
Into(result)
return
}
// List takes label and field selectors, and returns the list of PodGroups that match those selectors.
func (c *podGroups) List(opts v1.ListOptions) (result *v1alpha1.PodGroupList, err error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
result = &v1alpha1.PodGroupList{}
err = c.client.Get().
Namespace(c.ns).
Resource("podgroups").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Do().
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested podGroups.
func (c *podGroups) Watch(opts v1.ListOptions) (watch.Interface, error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
opts.Watch = true
return c.client.Get().
Namespace(c.ns).
Resource("podgroups").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Watch()
}
// Create takes the representation of a podGroup and creates it. Returns the server's representation of the podGroup, and an error, if there is any.
func (c *podGroups) Create(podGroup *v1alpha1.PodGroup) (result *v1alpha1.PodGroup, err error) {
result = &v1alpha1.PodGroup{}
err = c.client.Post().
Namespace(c.ns).
Resource("podgroups").
Body(podGroup).
Do().
Into(result)
return
}
// Update takes the representation of a podGroup and updates it. Returns the server's representation of the podGroup, and an error, if there is any.
func (c *podGroups) Update(podGroup *v1alpha1.PodGroup) (result *v1alpha1.PodGroup, err error) {
result = &v1alpha1.PodGroup{}
err = c.client.Put().
Namespace(c.ns).
Resource("podgroups").
Name(podGroup.Name).
Body(podGroup).
Do().
Into(result)
return
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *podGroups) UpdateStatus(podGroup *v1alpha1.PodGroup) (result *v1alpha1.PodGroup, err error) {
result = &v1alpha1.PodGroup{}
err = c.client.Put().
Namespace(c.ns).
Resource("podgroups").
Name(podGroup.Name).
SubResource("status").
Body(podGroup).
Do().
Into(result)
return
}
// Delete takes name of the podGroup and deletes it. Returns an error if one occurs.
func (c *podGroups) Delete(name string, options *v1.DeleteOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("podgroups").
Name(name).
Body(options).
Do().
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *podGroups) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
var timeout time.Duration
if listOptions.TimeoutSeconds != nil {
timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
}
return c.client.Delete().
Namespace(c.ns).
Resource("podgroups").
VersionedParams(&listOptions, scheme.ParameterCodec).
Timeout(timeout).
Body(options).
Do().
Error()
}
// Patch applies the patch and returns the patched podGroup.
func (c *podGroups) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PodGroup, err error) {
result = &v1alpha1.PodGroup{}
err = c.client.Patch(pt).
Namespace(c.ns).
Resource("podgroups").
SubResource(subresources...).
Name(name).
Body(data).
Do().
Into(result)
return
}

View File

@ -0,0 +1,180 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
import (
"time"
v1alpha1 "github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1"
scheme "github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
rest "k8s.io/client-go/rest"
)
// QueuesGetter has a method to return a QueueInterface.
// A group's client should implement this interface.
type QueuesGetter interface {
Queues() QueueInterface
}
// QueueInterface has methods to work with Queue resources.
type QueueInterface interface {
Create(*v1alpha1.Queue) (*v1alpha1.Queue, error)
Update(*v1alpha1.Queue) (*v1alpha1.Queue, error)
UpdateStatus(*v1alpha1.Queue) (*v1alpha1.Queue, error)
Delete(name string, options *v1.DeleteOptions) error
DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
Get(name string, options v1.GetOptions) (*v1alpha1.Queue, error)
List(opts v1.ListOptions) (*v1alpha1.QueueList, error)
Watch(opts v1.ListOptions) (watch.Interface, error)
Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Queue, err error)
QueueExpansion
}
// queues implements QueueInterface
type queues struct {
client rest.Interface
}
// newQueues returns a Queues
func newQueues(c *SchedulingV1alpha1Client) *queues {
return &queues{
client: c.RESTClient(),
}
}
// Get takes name of the queue, and returns the corresponding queue object, and an error if there is any.
func (c *queues) Get(name string, options v1.GetOptions) (result *v1alpha1.Queue, err error) {
result = &v1alpha1.Queue{}
err = c.client.Get().
Resource("queues").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do().
Into(result)
return
}
// List takes label and field selectors, and returns the list of Queues that match those selectors.
func (c *queues) List(opts v1.ListOptions) (result *v1alpha1.QueueList, err error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
result = &v1alpha1.QueueList{}
err = c.client.Get().
Resource("queues").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Do().
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested queues.
func (c *queues) Watch(opts v1.ListOptions) (watch.Interface, error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
opts.Watch = true
return c.client.Get().
Resource("queues").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Watch()
}
// Create takes the representation of a queue and creates it. Returns the server's representation of the queue, and an error, if there is any.
func (c *queues) Create(queue *v1alpha1.Queue) (result *v1alpha1.Queue, err error) {
result = &v1alpha1.Queue{}
err = c.client.Post().
Resource("queues").
Body(queue).
Do().
Into(result)
return
}
// Update takes the representation of a queue and updates it. Returns the server's representation of the queue, and an error, if there is any.
func (c *queues) Update(queue *v1alpha1.Queue) (result *v1alpha1.Queue, err error) {
result = &v1alpha1.Queue{}
err = c.client.Put().
Resource("queues").
Name(queue.Name).
Body(queue).
Do().
Into(result)
return
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *queues) UpdateStatus(queue *v1alpha1.Queue) (result *v1alpha1.Queue, err error) {
result = &v1alpha1.Queue{}
err = c.client.Put().
Resource("queues").
Name(queue.Name).
SubResource("status").
Body(queue).
Do().
Into(result)
return
}
// Delete takes name of the queue and deletes it. Returns an error if one occurs.
func (c *queues) Delete(name string, options *v1.DeleteOptions) error {
return c.client.Delete().
Resource("queues").
Name(name).
Body(options).
Do().
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *queues) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
var timeout time.Duration
if listOptions.TimeoutSeconds != nil {
timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
}
return c.client.Delete().
Resource("queues").
VersionedParams(&listOptions, scheme.ParameterCodec).
Timeout(timeout).
Body(options).
Do().
Error()
}
// Patch applies the patch and returns the patched queue.
func (c *queues) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Queue, err error) {
result = &v1alpha1.Queue{}
err = c.client.Patch(pt).
Resource("queues").
SubResource(subresources...).
Name(name).
Body(data).
Do().
Into(result)
return
}

View File

@ -0,0 +1,95 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
import (
v1alpha1 "github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1"
"github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/scheme"
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
rest "k8s.io/client-go/rest"
)
type SchedulingV1alpha1Interface interface {
RESTClient() rest.Interface
PodGroupsGetter
QueuesGetter
}
// SchedulingV1alpha1Client is used to interact with features provided by the scheduling group.
type SchedulingV1alpha1Client struct {
restClient rest.Interface
}
func (c *SchedulingV1alpha1Client) PodGroups(namespace string) PodGroupInterface {
return newPodGroups(c, namespace)
}
func (c *SchedulingV1alpha1Client) Queues() QueueInterface {
return newQueues(c)
}
// NewForConfig creates a new SchedulingV1alpha1Client for the given config.
func NewForConfig(c *rest.Config) (*SchedulingV1alpha1Client, error) {
config := *c
if err := setConfigDefaults(&config); err != nil {
return nil, err
}
client, err := rest.RESTClientFor(&config)
if err != nil {
return nil, err
}
return &SchedulingV1alpha1Client{client}, nil
}
// NewForConfigOrDie creates a new SchedulingV1alpha1Client for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *SchedulingV1alpha1Client {
client, err := NewForConfig(c)
if err != nil {
panic(err)
}
return client
}
// New creates a new SchedulingV1alpha1Client for the given RESTClient.
func New(c rest.Interface) *SchedulingV1alpha1Client {
return &SchedulingV1alpha1Client{c}
}
func setConfigDefaults(config *rest.Config) error {
gv := v1alpha1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
}
return nil
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *SchedulingV1alpha1Client) RESTClient() rest.Interface {
if c == nil {
return nil
}
return c.restClient
}

View File

@ -0,0 +1,180 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package externalversions
import (
reflect "reflect"
sync "sync"
time "time"
versioned "github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned"
internalinterfaces "github.com/kubernetes-sigs/kube-batch/pkg/client/informers/externalversions/internalinterfaces"
scheduling "github.com/kubernetes-sigs/kube-batch/pkg/client/informers/externalversions/scheduling"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
cache "k8s.io/client-go/tools/cache"
)
// SharedInformerOption defines the functional option type for SharedInformerFactory.
type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory
type sharedInformerFactory struct {
client versioned.Interface
namespace string
tweakListOptions internalinterfaces.TweakListOptionsFunc
lock sync.Mutex
defaultResync time.Duration
customResync map[reflect.Type]time.Duration
informers map[reflect.Type]cache.SharedIndexInformer
// startedInformers is used for tracking which informers have been started.
// This allows Start() to be called multiple times safely.
startedInformers map[reflect.Type]bool
}
// WithCustomResyncConfig sets a custom resync period for the specified informer types.
func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
for k, v := range resyncConfig {
factory.customResync[reflect.TypeOf(k)] = v
}
return factory
}
}
// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory.
func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
factory.tweakListOptions = tweakListOptions
return factory
}
}
// WithNamespace limits the SharedInformerFactory to the specified namespace.
func WithNamespace(namespace string) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
factory.namespace = namespace
return factory
}
}
// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces.
func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory {
return NewSharedInformerFactoryWithOptions(client, defaultResync)
}
// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory.
// Listers obtained via this SharedInformerFactory will be subject to the same filters
// as specified here.
// Deprecated: Please use NewSharedInformerFactoryWithOptions instead
func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory {
return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions))
}
// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options.
func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory {
factory := &sharedInformerFactory{
client: client,
namespace: v1.NamespaceAll,
defaultResync: defaultResync,
informers: make(map[reflect.Type]cache.SharedIndexInformer),
startedInformers: make(map[reflect.Type]bool),
customResync: make(map[reflect.Type]time.Duration),
}
// Apply all options
for _, opt := range options {
factory = opt(factory)
}
return factory
}
// Start initializes all requested informers.
func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) {
f.lock.Lock()
defer f.lock.Unlock()
for informerType, informer := range f.informers {
if !f.startedInformers[informerType] {
go informer.Run(stopCh)
f.startedInformers[informerType] = true
}
}
}
// WaitForCacheSync waits for all started informers' cache were synced.
func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool {
informers := func() map[reflect.Type]cache.SharedIndexInformer {
f.lock.Lock()
defer f.lock.Unlock()
informers := map[reflect.Type]cache.SharedIndexInformer{}
for informerType, informer := range f.informers {
if f.startedInformers[informerType] {
informers[informerType] = informer
}
}
return informers
}()
res := map[reflect.Type]bool{}
for informType, informer := range informers {
res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced)
}
return res
}
// InternalInformerFor returns the SharedIndexInformer for obj using an internal
// client.
func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer {
f.lock.Lock()
defer f.lock.Unlock()
informerType := reflect.TypeOf(obj)
informer, exists := f.informers[informerType]
if exists {
return informer
}
resyncPeriod, exists := f.customResync[informerType]
if !exists {
resyncPeriod = f.defaultResync
}
informer = newFunc(f.client, resyncPeriod)
f.informers[informerType] = informer
return informer
}
// SharedInformerFactory provides shared informers for resources in all known
// API group versions.
type SharedInformerFactory interface {
internalinterfaces.SharedInformerFactory
ForResource(resource schema.GroupVersionResource) (GenericInformer, error)
WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool
Scheduling() scheduling.Interface
}
func (f *sharedInformerFactory) Scheduling() scheduling.Interface {
return scheduling.New(f, f.namespace, f.tweakListOptions)
}

View File

@ -0,0 +1,64 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package externalversions
import (
"fmt"
v1alpha1 "github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1"
schema "k8s.io/apimachinery/pkg/runtime/schema"
cache "k8s.io/client-go/tools/cache"
)
// GenericInformer is type of SharedIndexInformer which will locate and delegate to other
// sharedInformers based on type
type GenericInformer interface {
Informer() cache.SharedIndexInformer
Lister() cache.GenericLister
}
type genericInformer struct {
informer cache.SharedIndexInformer
resource schema.GroupResource
}
// Informer returns the SharedIndexInformer.
func (f *genericInformer) Informer() cache.SharedIndexInformer {
return f.informer
}
// Lister returns the GenericLister.
func (f *genericInformer) Lister() cache.GenericLister {
return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource)
}
// ForResource gives generic access to a shared informer of the matching type
// TODO extend this to unknown resources with a client pool
func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) {
switch resource {
// Group=scheduling, Version=v1alpha1
case v1alpha1.SchemeGroupVersion.WithResource("podgroups"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Scheduling().V1alpha1().PodGroups().Informer()}, nil
case v1alpha1.SchemeGroupVersion.WithResource("queues"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Scheduling().V1alpha1().Queues().Informer()}, nil
}
return nil, fmt.Errorf("no informer found for %v", resource)
}

View File

@ -0,0 +1,40 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package internalinterfaces
import (
time "time"
versioned "github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
cache "k8s.io/client-go/tools/cache"
)
// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer.
type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer
// SharedInformerFactory a small interface to allow for adding an informer without an import cycle
type SharedInformerFactory interface {
Start(stopCh <-chan struct{})
InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer
}
// TweakListOptionsFunc is a function that transforms a v1.ListOptions.
type TweakListOptionsFunc func(*v1.ListOptions)

View File

@ -0,0 +1,46 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package scheduling
import (
internalinterfaces "github.com/kubernetes-sigs/kube-batch/pkg/client/informers/externalversions/internalinterfaces"
v1alpha1 "github.com/kubernetes-sigs/kube-batch/pkg/client/informers/externalversions/scheduling/v1alpha1"
)
// Interface provides access to each of this group's versions.
type Interface interface {
// V1alpha1 provides access to shared informers for resources in V1alpha1.
V1alpha1() v1alpha1.Interface
}
type group struct {
factory internalinterfaces.SharedInformerFactory
namespace string
tweakListOptions internalinterfaces.TweakListOptionsFunc
}
// New returns a new Interface.
func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
}
// V1alpha1 returns a new v1alpha1.Interface.
func (g *group) V1alpha1() v1alpha1.Interface {
return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions)
}

View File

@ -0,0 +1,52 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1alpha1
import (
internalinterfaces "github.com/kubernetes-sigs/kube-batch/pkg/client/informers/externalversions/internalinterfaces"
)
// Interface provides access to all the informers in this group version.
type Interface interface {
// PodGroups returns a PodGroupInformer.
PodGroups() PodGroupInformer
// Queues returns a QueueInformer.
Queues() QueueInformer
}
type version struct {
factory internalinterfaces.SharedInformerFactory
namespace string
tweakListOptions internalinterfaces.TweakListOptionsFunc
}
// New returns a new Interface.
func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
}
// PodGroups returns a PodGroupInformer.
func (v *version) PodGroups() PodGroupInformer {
return &podGroupInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
}
// Queues returns a QueueInformer.
func (v *version) Queues() QueueInformer {
return &queueInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
}

View File

@ -0,0 +1,89 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1alpha1
import (
time "time"
schedulingv1alpha1 "github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1"
versioned "github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned"
internalinterfaces "github.com/kubernetes-sigs/kube-batch/pkg/client/informers/externalversions/internalinterfaces"
v1alpha1 "github.com/kubernetes-sigs/kube-batch/pkg/client/listers/scheduling/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
cache "k8s.io/client-go/tools/cache"
)
// PodGroupInformer provides access to a shared informer and lister for
// PodGroups.
type PodGroupInformer interface {
Informer() cache.SharedIndexInformer
Lister() v1alpha1.PodGroupLister
}
type podGroupInformer struct {
factory internalinterfaces.SharedInformerFactory
tweakListOptions internalinterfaces.TweakListOptionsFunc
namespace string
}
// NewPodGroupInformer constructs a new informer for PodGroup type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewPodGroupInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
return NewFilteredPodGroupInformer(client, namespace, resyncPeriod, indexers, nil)
}
// NewFilteredPodGroupInformer constructs a new informer for PodGroup type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredPodGroupInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.SchedulingV1alpha1().PodGroups(namespace).List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.SchedulingV1alpha1().PodGroups(namespace).Watch(options)
},
},
&schedulingv1alpha1.PodGroup{},
resyncPeriod,
indexers,
)
}
func (f *podGroupInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
return NewFilteredPodGroupInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
}
func (f *podGroupInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&schedulingv1alpha1.PodGroup{}, f.defaultInformer)
}
func (f *podGroupInformer) Lister() v1alpha1.PodGroupLister {
return v1alpha1.NewPodGroupLister(f.Informer().GetIndexer())
}

View File

@ -0,0 +1,88 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1alpha1
import (
time "time"
schedulingv1alpha1 "github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1"
versioned "github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned"
internalinterfaces "github.com/kubernetes-sigs/kube-batch/pkg/client/informers/externalversions/internalinterfaces"
v1alpha1 "github.com/kubernetes-sigs/kube-batch/pkg/client/listers/scheduling/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
cache "k8s.io/client-go/tools/cache"
)
// QueueInformer provides access to a shared informer and lister for
// Queues.
type QueueInformer interface {
Informer() cache.SharedIndexInformer
Lister() v1alpha1.QueueLister
}
type queueInformer struct {
factory internalinterfaces.SharedInformerFactory
tweakListOptions internalinterfaces.TweakListOptionsFunc
}
// NewQueueInformer constructs a new informer for Queue type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewQueueInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
return NewFilteredQueueInformer(client, resyncPeriod, indexers, nil)
}
// NewFilteredQueueInformer constructs a new informer for Queue type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredQueueInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.SchedulingV1alpha1().Queues().List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.SchedulingV1alpha1().Queues().Watch(options)
},
},
&schedulingv1alpha1.Queue{},
resyncPeriod,
indexers,
)
}
func (f *queueInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
return NewFilteredQueueInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
}
func (f *queueInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&schedulingv1alpha1.Queue{}, f.defaultInformer)
}
func (f *queueInformer) Lister() v1alpha1.QueueLister {
return v1alpha1.NewQueueLister(f.Informer().GetIndexer())
}

View File

@ -0,0 +1,31 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1alpha1
// PodGroupListerExpansion allows custom methods to be added to
// PodGroupLister.
type PodGroupListerExpansion interface{}
// PodGroupNamespaceListerExpansion allows custom methods to be added to
// PodGroupNamespaceLister.
type PodGroupNamespaceListerExpansion interface{}
// QueueListerExpansion allows custom methods to be added to
// QueueLister.
type QueueListerExpansion interface{}

View File

@ -0,0 +1,94 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1alpha1
import (
v1alpha1 "github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/cache"
)
// PodGroupLister helps list PodGroups.
type PodGroupLister interface {
// List lists all PodGroups in the indexer.
List(selector labels.Selector) (ret []*v1alpha1.PodGroup, err error)
// PodGroups returns an object that can list and get PodGroups.
PodGroups(namespace string) PodGroupNamespaceLister
PodGroupListerExpansion
}
// podGroupLister implements the PodGroupLister interface.
type podGroupLister struct {
indexer cache.Indexer
}
// NewPodGroupLister returns a new PodGroupLister.
func NewPodGroupLister(indexer cache.Indexer) PodGroupLister {
return &podGroupLister{indexer: indexer}
}
// List lists all PodGroups in the indexer.
func (s *podGroupLister) List(selector labels.Selector) (ret []*v1alpha1.PodGroup, err error) {
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
ret = append(ret, m.(*v1alpha1.PodGroup))
})
return ret, err
}
// PodGroups returns an object that can list and get PodGroups.
func (s *podGroupLister) PodGroups(namespace string) PodGroupNamespaceLister {
return podGroupNamespaceLister{indexer: s.indexer, namespace: namespace}
}
// PodGroupNamespaceLister helps list and get PodGroups.
type PodGroupNamespaceLister interface {
// List lists all PodGroups in the indexer for a given namespace.
List(selector labels.Selector) (ret []*v1alpha1.PodGroup, err error)
// Get retrieves the PodGroup from the indexer for a given namespace and name.
Get(name string) (*v1alpha1.PodGroup, error)
PodGroupNamespaceListerExpansion
}
// podGroupNamespaceLister implements the PodGroupNamespaceLister
// interface.
type podGroupNamespaceLister struct {
indexer cache.Indexer
namespace string
}
// List lists all PodGroups in the indexer for a given namespace.
func (s podGroupNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.PodGroup, err error) {
err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
ret = append(ret, m.(*v1alpha1.PodGroup))
})
return ret, err
}
// Get retrieves the PodGroup from the indexer for a given namespace and name.
func (s podGroupNamespaceLister) Get(name string) (*v1alpha1.PodGroup, error) {
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(v1alpha1.Resource("podgroup"), name)
}
return obj.(*v1alpha1.PodGroup), nil
}

View File

@ -0,0 +1,65 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1alpha1
import (
v1alpha1 "github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/cache"
)
// QueueLister helps list Queues.
type QueueLister interface {
// List lists all Queues in the indexer.
List(selector labels.Selector) (ret []*v1alpha1.Queue, err error)
// Get retrieves the Queue from the index for a given name.
Get(name string) (*v1alpha1.Queue, error)
QueueListerExpansion
}
// queueLister implements the QueueLister interface.
type queueLister struct {
indexer cache.Indexer
}
// NewQueueLister returns a new QueueLister.
func NewQueueLister(indexer cache.Indexer) QueueLister {
return &queueLister{indexer: indexer}
}
// List lists all Queues in the indexer.
func (s *queueLister) List(selector labels.Selector) (ret []*v1alpha1.Queue, err error) {
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
ret = append(ret, m.(*v1alpha1.Queue))
})
return ret, err
}
// Get retrieves the Queue from the index for a given name.
func (s *queueLister) Get(name string) (*v1alpha1.Queue, error) {
obj, exists, err := s.indexer.GetByKey(name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(v1alpha1.Resource("queue"), name)
}
return obj.(*v1alpha1.Queue), nil
}

View File

@ -0,0 +1,193 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package allocate
import (
"fmt"
"github.com/golang/glog"
"github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/util"
)
type allocateAction struct {
ssn *framework.Session
}
func New() *allocateAction {
return &allocateAction{}
}
func (alloc *allocateAction) Name() string {
return "allocate"
}
func (alloc *allocateAction) Initialize() {}
func (alloc *allocateAction) Execute(ssn *framework.Session) {
glog.V(3).Infof("Enter Allocate ...")
defer glog.V(3).Infof("Leaving Allocate ...")
queues := util.NewPriorityQueue(ssn.QueueOrderFn)
jobsMap := map[api.QueueID]*util.PriorityQueue{}
for _, job := range ssn.Jobs {
if job.PodGroup.Status.Phase == v1alpha1.PodGroupPending {
continue
}
if queue, found := ssn.Queues[job.Queue]; found {
queues.Push(queue)
} else {
glog.Warningf("Skip adding Job <%s/%s> because its queue %s is not found",
job.Namespace, job.Name, job.Queue)
continue
}
if _, found := jobsMap[job.Queue]; !found {
jobsMap[job.Queue] = util.NewPriorityQueue(ssn.JobOrderFn)
}
glog.V(4).Infof("Added Job <%s/%s> into Queue <%s>", job.Namespace, job.Name, job.Queue)
jobsMap[job.Queue].Push(job)
}
glog.V(3).Infof("Try to allocate resource to %d Queues", len(jobsMap))
pendingTasks := map[api.JobID]*util.PriorityQueue{}
allNodes := util.GetNodeList(ssn.Nodes)
predicateFn := func(task *api.TaskInfo, node *api.NodeInfo) error {
// Check for Resource Predicate
// TODO: We could not allocate resource to task from both node.Idle and node.Releasing now,
// after it is done, we could change the following compare to:
// clonedNode := node.Idle.Clone()
// if !task.InitResreq.LessEqual(clonedNode.Add(node.Releasing)) {
// ...
// }
if !task.InitResreq.LessEqual(node.Idle) && !task.InitResreq.LessEqual(node.Releasing) {
return fmt.Errorf("task <%s/%s> ResourceFit failed on node <%s>",
task.Namespace, task.Name, node.Name)
}
return ssn.PredicateFn(task, node)
}
for {
if queues.Empty() {
break
}
queue := queues.Pop().(*api.QueueInfo)
if ssn.Overused(queue) {
glog.V(3).Infof("Queue <%s> is overused, ignore it.", queue.Name)
continue
}
jobs, found := jobsMap[queue.UID]
glog.V(3).Infof("Try to allocate resource to Jobs in Queue <%v>", queue.Name)
if !found || jobs.Empty() {
glog.V(4).Infof("Can not find jobs for queue %s.", queue.Name)
continue
}
job := jobs.Pop().(*api.JobInfo)
if _, found := pendingTasks[job.UID]; !found {
tasks := util.NewPriorityQueue(ssn.TaskOrderFn)
for _, task := range job.TaskStatusIndex[api.Pending] {
// Skip BestEffort task in 'allocate' action.
if task.Resreq.IsEmpty() {
glog.V(4).Infof("Task <%v/%v> is BestEffort task, skip it.",
task.Namespace, task.Name)
continue
}
tasks.Push(task)
}
pendingTasks[job.UID] = tasks
}
tasks := pendingTasks[job.UID]
glog.V(3).Infof("Try to allocate resource to %d tasks of Job <%v/%v>",
tasks.Len(), job.Namespace, job.Name)
for !tasks.Empty() {
task := tasks.Pop().(*api.TaskInfo)
glog.V(3).Infof("There are <%d> nodes for Job <%v/%v>",
len(ssn.Nodes), job.Namespace, job.Name)
//any task that doesn't fit will be the last processed
//within this loop context so any existing contents of
//NodesFitDelta are for tasks that eventually did fit on a
//node
if len(job.NodesFitDelta) > 0 {
job.NodesFitDelta = make(api.NodeResourceMap)
}
predicateNodes := util.PredicateNodes(task, allNodes, predicateFn)
if len(predicateNodes) == 0 {
break
}
nodeScores := util.PrioritizeNodes(task, predicateNodes, ssn.NodeOrderFn)
node := util.SelectBestNode(nodeScores)
// Allocate idle resource to the task.
if task.InitResreq.LessEqual(node.Idle) {
glog.V(3).Infof("Binding Task <%v/%v> to node <%v>",
task.Namespace, task.Name, node.Name)
if err := ssn.Allocate(task, node.Name); err != nil {
glog.Errorf("Failed to bind Task %v on %v in Session %v, err: %v",
task.UID, node.Name, ssn.UID, err)
}
} else {
//store information about missing resources
job.NodesFitDelta[node.Name] = node.Idle.Clone()
job.NodesFitDelta[node.Name].FitDelta(task.InitResreq)
glog.V(3).Infof("Predicates failed for task <%s/%s> on node <%s> with limited resources",
task.Namespace, task.Name, node.Name)
// Allocate releasing resource to the task if any.
if task.InitResreq.LessEqual(node.Releasing) {
glog.V(3).Infof("Pipelining Task <%v/%v> to node <%v> for <%v> on <%v>",
task.Namespace, task.Name, node.Name, task.InitResreq, node.Releasing)
if err := ssn.Pipeline(task, node.Name); err != nil {
glog.Errorf("Failed to pipeline Task %v on %v in Session %v",
task.UID, node.Name, ssn.UID)
}
}
}
if ssn.JobReady(job) {
jobs.Push(job)
break
}
}
// Added Queue back until no job in Queue.
queues.Push(queue)
}
}
func (alloc *allocateAction) UnInitialize() {}

View File

@ -0,0 +1,78 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package backfill
import (
"github.com/golang/glog"
"github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework"
)
type backfillAction struct {
ssn *framework.Session
}
func New() *backfillAction {
return &backfillAction{}
}
func (alloc *backfillAction) Name() string {
return "backfill"
}
func (alloc *backfillAction) Initialize() {}
func (alloc *backfillAction) Execute(ssn *framework.Session) {
glog.V(3).Infof("Enter Backfill ...")
defer glog.V(3).Infof("Leaving Backfill ...")
// TODO (k82cn): When backfill, it's also need to balance between Queues.
for _, job := range ssn.Jobs {
if job.PodGroup.Status.Phase == v1alpha1.PodGroupPending {
continue
}
for _, task := range job.TaskStatusIndex[api.Pending] {
if task.InitResreq.IsEmpty() {
// As task did not request resources, so it only need to meet predicates.
// TODO (k82cn): need to prioritize nodes to avoid pod hole.
for _, node := range ssn.Nodes {
// TODO (k82cn): predicates did not consider pod number for now, there'll
// be ping-pong case here.
if err := ssn.PredicateFn(task, node); err != nil {
glog.V(3).Infof("Predicates failed for task <%s/%s> on node <%s>: %v",
task.Namespace, task.Name, node.Name, err)
continue
}
glog.V(3).Infof("Binding Task <%v/%v> to node <%v>", task.Namespace, task.Name, node.Name)
if err := ssn.Allocate(task, node.Name); err != nil {
glog.Errorf("Failed to bind Task %v on %v in Session %v", task.UID, node.Name, ssn.UID)
continue
}
break
}
} else {
// TODO (k82cn): backfill for other case.
}
}
}
}
func (alloc *backfillAction) UnInitialize() {}

View File

@ -0,0 +1,128 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package enqueue
import (
"github.com/golang/glog"
"github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/util"
)
type enqueueAction struct {
ssn *framework.Session
}
func New() *enqueueAction {
return &enqueueAction{}
}
func (enqueue *enqueueAction) Name() string {
return "enqueue"
}
func (enqueue *enqueueAction) Initialize() {}
func (enqueue *enqueueAction) Execute(ssn *framework.Session) {
glog.V(3).Infof("Enter Enqueue ...")
defer glog.V(3).Infof("Leaving Enqueue ...")
queues := util.NewPriorityQueue(ssn.QueueOrderFn)
queueMap := map[api.QueueID]*api.QueueInfo{}
jobsMap := map[api.QueueID]*util.PriorityQueue{}
for _, job := range ssn.Jobs {
if queue, found := ssn.Queues[job.Queue]; !found {
glog.Errorf("Failed to find Queue <%s> for Job <%s/%s>",
job.Queue, job.Namespace, job.Name)
continue
} else {
if _, existed := queueMap[queue.UID]; !existed {
glog.V(3).Infof("Added Queue <%s> for Job <%s/%s>",
queue.Name, job.Namespace, job.Name)
queueMap[queue.UID] = queue
queues.Push(queue)
}
}
if job.PodGroup.Status.Phase == v1alpha1.PodGroupPending {
if _, found := jobsMap[job.Queue]; !found {
jobsMap[job.Queue] = util.NewPriorityQueue(ssn.JobOrderFn)
}
glog.V(3).Infof("Added Job <%s/%s> into Queue <%s>", job.Namespace, job.Name, job.Queue)
jobsMap[job.Queue].Push(job)
}
}
glog.V(3).Infof("Try to enqueue PodGroup to %d Queues", len(jobsMap))
emptyRes := api.EmptyResource()
nodesIdleRes := api.EmptyResource()
for _, node := range ssn.Nodes {
nodesIdleRes.Add(node.Allocatable.Clone().Multi(1.2).Sub(node.Used))
}
for {
if queues.Empty() {
break
}
if nodesIdleRes.Less(emptyRes) {
glog.V(3).Infof("Node idle resource <%s> is overused, ignore it.")
break
}
queue := queues.Pop().(*api.QueueInfo)
// Found "high" priority job
jobs, found := jobsMap[queue.UID]
if !found || jobs.Empty() {
continue
}
job := jobs.Pop().(*api.JobInfo)
inqueue := false
if len(job.TaskStatusIndex[api.Pending]) != 0 {
inqueue = true
} else {
if job.PodGroup.Spec.MinResources == nil {
inqueue = true
} else {
pgResource := api.NewResource(*job.PodGroup.Spec.MinResources)
if pgResource.LessEqual(nodesIdleRes) {
nodesIdleRes.Sub(pgResource)
inqueue = true
}
}
}
if inqueue {
job.PodGroup.Status.Phase = v1alpha1.PodGroupInqueue
ssn.Jobs[job.UID] = job
}
// Added Queue back until no job in Queue.
queues.Push(queue)
}
}
func (enqueue *enqueueAction) UnInitialize() {}

View File

@ -0,0 +1,35 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package actions
import (
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/actions/allocate"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/actions/backfill"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/actions/enqueue"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/actions/preempt"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/actions/reclaim"
)
func init() {
framework.RegisterAction(reclaim.New())
framework.RegisterAction(allocate.New())
framework.RegisterAction(backfill.New())
framework.RegisterAction(preempt.New())
framework.RegisterAction(enqueue.New())
}

View File

@ -0,0 +1,273 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package preempt
import (
"fmt"
"github.com/golang/glog"
"github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/metrics"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/util"
)
type preemptAction struct {
ssn *framework.Session
}
func New() *preemptAction {
return &preemptAction{}
}
func (alloc *preemptAction) Name() string {
return "preempt"
}
func (alloc *preemptAction) Initialize() {}
func (alloc *preemptAction) Execute(ssn *framework.Session) {
glog.V(3).Infof("Enter Preempt ...")
defer glog.V(3).Infof("Leaving Preempt ...")
preemptorsMap := map[api.QueueID]*util.PriorityQueue{}
preemptorTasks := map[api.JobID]*util.PriorityQueue{}
var underRequest []*api.JobInfo
queues := map[api.QueueID]*api.QueueInfo{}
for _, job := range ssn.Jobs {
if job.PodGroup.Status.Phase == v1alpha1.PodGroupPending {
continue
}
if queue, found := ssn.Queues[job.Queue]; !found {
continue
} else if _, existed := queues[queue.UID]; !existed {
glog.V(3).Infof("Added Queue <%s> for Job <%s/%s>",
queue.Name, job.Namespace, job.Name)
queues[queue.UID] = queue
}
if len(job.TaskStatusIndex[api.Pending]) != 0 {
if _, found := preemptorsMap[job.Queue]; !found {
preemptorsMap[job.Queue] = util.NewPriorityQueue(ssn.JobOrderFn)
}
preemptorsMap[job.Queue].Push(job)
underRequest = append(underRequest, job)
preemptorTasks[job.UID] = util.NewPriorityQueue(ssn.TaskOrderFn)
for _, task := range job.TaskStatusIndex[api.Pending] {
preemptorTasks[job.UID].Push(task)
}
}
}
// Preemption between Jobs within Queue.
for _, queue := range queues {
for {
preemptors := preemptorsMap[queue.UID]
// If no preemptors, no preemption.
if preemptors == nil || preemptors.Empty() {
glog.V(4).Infof("No preemptors in Queue <%s>, break.", queue.Name)
break
}
preemptorJob := preemptors.Pop().(*api.JobInfo)
stmt := ssn.Statement()
assigned := false
for {
// If not preemptor tasks, next job.
if preemptorTasks[preemptorJob.UID].Empty() {
glog.V(3).Infof("No preemptor task in job <%s/%s>.",
preemptorJob.Namespace, preemptorJob.Name)
break
}
preemptor := preemptorTasks[preemptorJob.UID].Pop().(*api.TaskInfo)
if preempted, _ := preempt(ssn, stmt, preemptor, ssn.Nodes, func(task *api.TaskInfo) bool {
// Ignore non running task.
if task.Status != api.Running {
return false
}
job, found := ssn.Jobs[task.Job]
if !found {
return false
}
// Preempt other jobs within queue
return job.Queue == preemptorJob.Queue && preemptor.Job != task.Job
}); preempted {
assigned = true
}
// If job is not pipelined, keep preempting
if ssn.JobPipelined(preemptorJob) {
stmt.Commit()
break
}
}
// If job is not pipelined after try all tasks, next job.
if !ssn.JobPipelined(preemptorJob) {
stmt.Discard()
continue
}
if assigned {
preemptors.Push(preemptorJob)
}
}
// Preemption between Task within Job.
for _, job := range underRequest {
for {
if _, found := preemptorTasks[job.UID]; !found {
break
}
if preemptorTasks[job.UID].Empty() {
break
}
preemptor := preemptorTasks[job.UID].Pop().(*api.TaskInfo)
stmt := ssn.Statement()
assigned, _ := preempt(ssn, stmt, preemptor, ssn.Nodes, func(task *api.TaskInfo) bool {
// Ignore non running task.
if task.Status != api.Running {
return false
}
// Preempt tasks within job.
return preemptor.Job == task.Job
})
stmt.Commit()
// If no preemption, next job.
if !assigned {
break
}
}
}
}
}
func (alloc *preemptAction) UnInitialize() {}
func preempt(
ssn *framework.Session,
stmt *framework.Statement,
preemptor *api.TaskInfo,
nodes map[string]*api.NodeInfo,
filter func(*api.TaskInfo) bool,
) (bool, error) {
assigned := false
allNodes := util.GetNodeList(nodes)
predicateNodes := util.PredicateNodes(preemptor, allNodes, ssn.PredicateFn)
nodeScores := util.PrioritizeNodes(preemptor, predicateNodes, ssn.NodeOrderFn)
selectedNodes := util.SortNodes(nodeScores)
for _, node := range selectedNodes {
glog.V(3).Infof("Considering Task <%s/%s> on Node <%s>.",
preemptor.Namespace, preemptor.Name, node.Name)
var preemptees []*api.TaskInfo
preempted := api.EmptyResource()
resreq := preemptor.InitResreq.Clone()
for _, task := range node.Tasks {
if filter == nil {
preemptees = append(preemptees, task.Clone())
} else if filter(task) {
preemptees = append(preemptees, task.Clone())
}
}
victims := ssn.Preemptable(preemptor, preemptees)
metrics.UpdatePreemptionVictimsCount(len(victims))
if err := validateVictims(victims, resreq); err != nil {
glog.V(3).Infof("No validated victims on Node <%s>: %v", node.Name, err)
continue
}
victimsQueue := util.NewPriorityQueue(func(l, r interface{}) bool {
return !ssn.TaskOrderFn(l, r)
})
for _, victim := range victims {
victimsQueue.Push(victim)
}
// Preempt victims for tasks, pick lowest priority task first.
for !victimsQueue.Empty() {
preemptee := victimsQueue.Pop().(*api.TaskInfo)
glog.Errorf("Try to preempt Task <%s/%s> for Tasks <%s/%s>",
preemptee.Namespace, preemptee.Name, preemptor.Namespace, preemptor.Name)
if err := stmt.Evict(preemptee, "preempt"); err != nil {
glog.Errorf("Failed to preempt Task <%s/%s> for Tasks <%s/%s>: %v",
preemptee.Namespace, preemptee.Name, preemptor.Namespace, preemptor.Name, err)
continue
}
preempted.Add(preemptee.Resreq)
// If reclaimed enough resources, break loop to avoid Sub panic.
if resreq.LessEqual(preempted) {
break
}
}
metrics.RegisterPreemptionAttempts()
glog.V(3).Infof("Preempted <%v> for task <%s/%s> requested <%v>.",
preempted, preemptor.Namespace, preemptor.Name, preemptor.InitResreq)
if preemptor.InitResreq.LessEqual(preempted) {
if err := stmt.Pipeline(preemptor, node.Name); err != nil {
glog.Errorf("Failed to pipline Task <%s/%s> on Node <%s>",
preemptor.Namespace, preemptor.Name, node.Name)
}
// Ignore pipeline error, will be corrected in next scheduling loop.
assigned = true
break
}
}
return assigned, nil
}
func validateVictims(victims []*api.TaskInfo, resreq *api.Resource) error {
if len(victims) == 0 {
return fmt.Errorf("no victims")
}
// If not enough resource, continue
allRes := api.EmptyResource()
for _, v := range victims {
allRes.Add(v.Resreq)
}
if allRes.Less(resreq) {
return fmt.Errorf("not enough resources")
}
return nil
}

View File

@ -0,0 +1,201 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package reclaim
import (
"github.com/golang/glog"
"github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/util"
)
type reclaimAction struct {
ssn *framework.Session
}
func New() *reclaimAction {
return &reclaimAction{}
}
func (alloc *reclaimAction) Name() string {
return "reclaim"
}
func (alloc *reclaimAction) Initialize() {}
func (alloc *reclaimAction) Execute(ssn *framework.Session) {
glog.V(3).Infof("Enter Reclaim ...")
defer glog.V(3).Infof("Leaving Reclaim ...")
queues := util.NewPriorityQueue(ssn.QueueOrderFn)
queueMap := map[api.QueueID]*api.QueueInfo{}
preemptorsMap := map[api.QueueID]*util.PriorityQueue{}
preemptorTasks := map[api.JobID]*util.PriorityQueue{}
glog.V(3).Infof("There are <%d> Jobs and <%d> Queues in total for scheduling.",
len(ssn.Jobs), len(ssn.Queues))
var underRequest []*api.JobInfo
for _, job := range ssn.Jobs {
if job.PodGroup.Status.Phase == v1alpha1.PodGroupPending {
continue
}
if queue, found := ssn.Queues[job.Queue]; !found {
glog.Errorf("Failed to find Queue <%s> for Job <%s/%s>",
job.Queue, job.Namespace, job.Name)
continue
} else {
if _, existed := queueMap[queue.UID]; !existed {
glog.V(4).Infof("Added Queue <%s> for Job <%s/%s>",
queue.Name, job.Namespace, job.Name)
queueMap[queue.UID] = queue
queues.Push(queue)
}
}
if len(job.TaskStatusIndex[api.Pending]) != 0 {
if _, found := preemptorsMap[job.Queue]; !found {
preemptorsMap[job.Queue] = util.NewPriorityQueue(ssn.JobOrderFn)
}
preemptorsMap[job.Queue].Push(job)
underRequest = append(underRequest, job)
preemptorTasks[job.UID] = util.NewPriorityQueue(ssn.TaskOrderFn)
for _, task := range job.TaskStatusIndex[api.Pending] {
preemptorTasks[job.UID].Push(task)
}
}
}
for {
// If no queues, break
if queues.Empty() {
break
}
var job *api.JobInfo
var task *api.TaskInfo
queue := queues.Pop().(*api.QueueInfo)
if ssn.Overused(queue) {
glog.V(3).Infof("Queue <%s> is overused, ignore it.", queue.Name)
continue
}
// Found "high" priority job
if jobs, found := preemptorsMap[queue.UID]; !found || jobs.Empty() {
continue
} else {
job = jobs.Pop().(*api.JobInfo)
}
// Found "high" priority task to reclaim others
if tasks, found := preemptorTasks[job.UID]; !found || tasks.Empty() {
continue
} else {
task = tasks.Pop().(*api.TaskInfo)
}
assigned := false
for _, n := range ssn.Nodes {
// If predicates failed, next node.
if err := ssn.PredicateFn(task, n); err != nil {
continue
}
resreq := task.InitResreq.Clone()
reclaimed := api.EmptyResource()
glog.V(3).Infof("Considering Task <%s/%s> on Node <%s>.",
task.Namespace, task.Name, n.Name)
var reclaimees []*api.TaskInfo
for _, task := range n.Tasks {
// Ignore non running task.
if task.Status != api.Running {
continue
}
if j, found := ssn.Jobs[task.Job]; !found {
continue
} else if j.Queue != job.Queue {
// Clone task to avoid modify Task's status on node.
reclaimees = append(reclaimees, task.Clone())
}
}
victims := ssn.Reclaimable(task, reclaimees)
if len(victims) == 0 {
glog.V(3).Infof("No victims on Node <%s>.", n.Name)
continue
}
// If not enough resource, continue
allRes := api.EmptyResource()
for _, v := range victims {
allRes.Add(v.Resreq)
}
if allRes.Less(resreq) {
glog.V(3).Infof("Not enough resource from victims on Node <%s>.", n.Name)
continue
}
// Reclaim victims for tasks.
for _, reclaimee := range victims {
glog.Errorf("Try to reclaim Task <%s/%s> for Tasks <%s/%s>",
reclaimee.Namespace, reclaimee.Name, task.Namespace, task.Name)
if err := ssn.Evict(reclaimee, "reclaim"); err != nil {
glog.Errorf("Failed to reclaim Task <%s/%s> for Tasks <%s/%s>: %v",
reclaimee.Namespace, reclaimee.Name, task.Namespace, task.Name, err)
continue
}
reclaimed.Add(reclaimee.Resreq)
// If reclaimed enough resources, break loop to avoid Sub panic.
if resreq.LessEqual(reclaimed) {
break
}
}
glog.V(3).Infof("Reclaimed <%v> for task <%s/%s> requested <%v>.",
reclaimed, task.Namespace, task.Name, task.InitResreq)
if task.InitResreq.LessEqual(reclaimed) {
if err := ssn.Pipeline(task, n.Name); err != nil {
glog.Errorf("Failed to pipeline Task <%s/%s> on Node <%s>",
task.Namespace, task.Name, n.Name)
}
// Ignore error of pipeline, will be corrected in next scheduling loop.
assigned = true
break
}
}
if assigned {
queues.Push(queue)
}
}
}
func (ra *reclaimAction) UnInitialize() {
}

View File

@ -0,0 +1,61 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package api
import "fmt"
// ClusterInfo is a snapshot of cluster by cache.
type ClusterInfo struct {
Jobs map[JobID]*JobInfo
Nodes map[string]*NodeInfo
Queues map[QueueID]*QueueInfo
}
func (ci ClusterInfo) String() string {
str := "Cache:\n"
if len(ci.Nodes) != 0 {
str = str + "Nodes:\n"
for _, n := range ci.Nodes {
str = str + fmt.Sprintf("\t %s: idle(%v) used(%v) allocatable(%v) pods(%d)\n",
n.Name, n.Idle, n.Used, n.Allocatable, len(n.Tasks))
i := 0
for _, p := range n.Tasks {
str = str + fmt.Sprintf("\t\t %d: %v\n", i, p)
i++
}
}
}
if len(ci.Jobs) != 0 {
str = str + "Jobs:\n"
for _, job := range ci.Jobs {
str = str + fmt.Sprintf("\t Job(%s) name(%s) minAvailable(%v)\n",
job.UID, job.Name, job.MinAvailable)
i := 0
for _, task := range job.Tasks {
str = str + fmt.Sprintf("\t\t %d: %v\n", i, task)
i++
}
}
}
return str
}

View File

@ -0,0 +1,106 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package api
import (
"fmt"
v1 "k8s.io/api/core/v1"
clientcache "k8s.io/client-go/tools/cache"
)
// PodKey returns the string key of a pod.
func PodKey(pod *v1.Pod) TaskID {
key, err := clientcache.MetaNamespaceKeyFunc(pod)
if err != nil {
return TaskID(fmt.Sprintf("%v/%v", pod.Namespace, pod.Name))
}
return TaskID(key)
}
func getTaskStatus(pod *v1.Pod) TaskStatus {
switch pod.Status.Phase {
case v1.PodRunning:
if pod.DeletionTimestamp != nil {
return Releasing
}
return Running
case v1.PodPending:
if pod.DeletionTimestamp != nil {
return Releasing
}
if len(pod.Spec.NodeName) == 0 {
return Pending
}
return Bound
case v1.PodUnknown:
return Unknown
case v1.PodSucceeded:
return Succeeded
case v1.PodFailed:
return Failed
}
return Unknown
}
// AllocatedStatus checks whether the tasks has AllocatedStatus
func AllocatedStatus(status TaskStatus) bool {
switch status {
case Bound, Binding, Running, Allocated:
return true
default:
return false
}
}
// MergeErrors is used to merge multiple errors into single error
func MergeErrors(errs ...error) error {
msg := "errors: "
foundErr := false
i := 1
for _, e := range errs {
if e != nil {
if foundErr {
msg = fmt.Sprintf("%s, %d: ", msg, i)
} else {
msg = fmt.Sprintf("%s %d: ", msg, i)
}
msg = fmt.Sprintf("%s%v", msg, e)
foundErr = true
i++
}
}
if foundErr {
return fmt.Errorf("%s", msg)
}
return nil
}
// JobTerminated checks whether job was terminated.
func JobTerminated(job *JobInfo) bool {
return job.PodGroup == nil &&
job.PDB == nil &&
len(job.Tasks) == 0
}

View File

@ -0,0 +1,60 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package helpers
import (
"math"
v1 "k8s.io/api/core/v1"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api"
)
// Min is used to find the min of two resource types
func Min(l, r *api.Resource) *api.Resource {
res := &api.Resource{}
res.MilliCPU = math.Min(l.MilliCPU, r.MilliCPU)
res.Memory = math.Min(l.Memory, r.Memory)
if l.ScalarResources == nil || r.ScalarResources == nil {
return res
}
res.ScalarResources = map[v1.ResourceName]float64{}
for lName, lQuant := range l.ScalarResources {
res.ScalarResources[lName] = math.Min(lQuant, r.ScalarResources[lName])
}
return res
}
// Share is used to determine the share
func Share(l, r float64) float64 {
var share float64
if r == 0 {
if l == 0 {
share = 0
} else {
share = 1
}
} else {
share = l / r
}
return share
}

View File

@ -0,0 +1,426 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package api
import (
"fmt"
"sort"
"strings"
v1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1"
)
// TaskID is UID type for Task
type TaskID types.UID
// TaskInfo will have all infos about the task
type TaskInfo struct {
UID TaskID
Job JobID
Name string
Namespace string
// Resreq is the resource that used when task running.
Resreq *Resource
// InitResreq is the resource that used to launch a task.
InitResreq *Resource
NodeName string
Status TaskStatus
Priority int32
VolumeReady bool
Pod *v1.Pod
}
func getJobID(pod *v1.Pod) JobID {
if len(pod.Annotations) != 0 {
if gn, found := pod.Annotations[v1alpha1.GroupNameAnnotationKey]; found && len(gn) != 0 {
// Make sure Pod and PodGroup belong to the same namespace.
jobID := fmt.Sprintf("%s/%s", pod.Namespace, gn)
return JobID(jobID)
}
}
return ""
}
// NewTaskInfo creates new taskInfo object for a Pod
func NewTaskInfo(pod *v1.Pod) *TaskInfo {
req := GetPodResourceWithoutInitContainers(pod)
initResreq := GetPodResourceRequest(pod)
jobID := getJobID(pod)
ti := &TaskInfo{
UID: TaskID(pod.UID),
Job: jobID,
Name: pod.Name,
Namespace: pod.Namespace,
NodeName: pod.Spec.NodeName,
Status: getTaskStatus(pod),
Priority: 1,
Pod: pod,
Resreq: req,
InitResreq: initResreq,
}
if pod.Spec.Priority != nil {
ti.Priority = *pod.Spec.Priority
}
return ti
}
// Clone is used for cloning a task
func (ti *TaskInfo) Clone() *TaskInfo {
return &TaskInfo{
UID: ti.UID,
Job: ti.Job,
Name: ti.Name,
Namespace: ti.Namespace,
NodeName: ti.NodeName,
Status: ti.Status,
Priority: ti.Priority,
Pod: ti.Pod,
Resreq: ti.Resreq.Clone(),
InitResreq: ti.InitResreq.Clone(),
VolumeReady: ti.VolumeReady,
}
}
// String returns the taskInfo details in a string
func (ti TaskInfo) String() string {
return fmt.Sprintf("Task (%v:%v/%v): job %v, status %v, pri %v, resreq %v",
ti.UID, ti.Namespace, ti.Name, ti.Job, ti.Status, ti.Priority, ti.Resreq)
}
// JobID is the type of JobInfo's ID.
type JobID types.UID
type tasksMap map[TaskID]*TaskInfo
// NodeResourceMap stores resource in a node
type NodeResourceMap map[string]*Resource
// JobInfo will have all info of a Job
type JobInfo struct {
UID JobID
Name string
Namespace string
Queue QueueID
Priority int32
NodeSelector map[string]string
MinAvailable int32
NodesFitDelta NodeResourceMap
// All tasks of the Job.
TaskStatusIndex map[TaskStatus]tasksMap
Tasks tasksMap
Allocated *Resource
TotalRequest *Resource
CreationTimestamp metav1.Time
PodGroup *v1alpha1.PodGroup
// TODO(k82cn): keep backward compatibility, removed it when v1alpha1 finalized.
PDB *policyv1.PodDisruptionBudget
}
// NewJobInfo creates a new jobInfo for set of tasks
func NewJobInfo(uid JobID, tasks ...*TaskInfo) *JobInfo {
job := &JobInfo{
UID: uid,
MinAvailable: 0,
NodeSelector: make(map[string]string),
NodesFitDelta: make(NodeResourceMap),
Allocated: EmptyResource(),
TotalRequest: EmptyResource(),
TaskStatusIndex: map[TaskStatus]tasksMap{},
Tasks: tasksMap{},
}
for _, task := range tasks {
job.AddTaskInfo(task)
}
return job
}
// UnsetPodGroup removes podGroup details from a job
func (ji *JobInfo) UnsetPodGroup() {
ji.PodGroup = nil
}
// SetPodGroup sets podGroup details to a job
func (ji *JobInfo) SetPodGroup(pg *v1alpha1.PodGroup) {
ji.Name = pg.Name
ji.Namespace = pg.Namespace
ji.MinAvailable = pg.Spec.MinMember
ji.Queue = QueueID(pg.Spec.Queue)
ji.CreationTimestamp = pg.GetCreationTimestamp()
ji.PodGroup = pg
}
// SetPDB sets PDB to a job
func (ji *JobInfo) SetPDB(pdb *policyv1.PodDisruptionBudget) {
ji.Name = pdb.Name
ji.MinAvailable = pdb.Spec.MinAvailable.IntVal
ji.Namespace = pdb.Namespace
ji.CreationTimestamp = pdb.GetCreationTimestamp()
ji.PDB = pdb
}
// UnsetPDB removes PDB info of a job
func (ji *JobInfo) UnsetPDB() {
ji.PDB = nil
}
// GetTasks gets all tasks with the taskStatus
func (ji *JobInfo) GetTasks(statuses ...TaskStatus) []*TaskInfo {
var res []*TaskInfo
for _, status := range statuses {
if tasks, found := ji.TaskStatusIndex[status]; found {
for _, task := range tasks {
res = append(res, task.Clone())
}
}
}
return res
}
func (ji *JobInfo) addTaskIndex(ti *TaskInfo) {
if _, found := ji.TaskStatusIndex[ti.Status]; !found {
ji.TaskStatusIndex[ti.Status] = tasksMap{}
}
ji.TaskStatusIndex[ti.Status][ti.UID] = ti
}
// AddTaskInfo is used to add a task to a job
func (ji *JobInfo) AddTaskInfo(ti *TaskInfo) {
ji.Tasks[ti.UID] = ti
ji.addTaskIndex(ti)
ji.TotalRequest.Add(ti.Resreq)
if AllocatedStatus(ti.Status) {
ji.Allocated.Add(ti.Resreq)
}
}
// UpdateTaskStatus is used to update task's status in a job
func (ji *JobInfo) UpdateTaskStatus(task *TaskInfo, status TaskStatus) error {
if err := validateStatusUpdate(task.Status, status); err != nil {
return err
}
// Remove the task from the task list firstly
ji.DeleteTaskInfo(task)
// Update task's status to the target status
task.Status = status
ji.AddTaskInfo(task)
return nil
}
func (ji *JobInfo) deleteTaskIndex(ti *TaskInfo) {
if tasks, found := ji.TaskStatusIndex[ti.Status]; found {
delete(tasks, ti.UID)
if len(tasks) == 0 {
delete(ji.TaskStatusIndex, ti.Status)
}
}
}
// DeleteTaskInfo is used to delete a task from a job
func (ji *JobInfo) DeleteTaskInfo(ti *TaskInfo) error {
if task, found := ji.Tasks[ti.UID]; found {
ji.TotalRequest.Sub(task.Resreq)
if AllocatedStatus(task.Status) {
ji.Allocated.Sub(task.Resreq)
}
delete(ji.Tasks, task.UID)
ji.deleteTaskIndex(task)
return nil
}
return fmt.Errorf("failed to find task <%v/%v> in job <%v/%v>",
ti.Namespace, ti.Name, ji.Namespace, ji.Name)
}
// Clone is used to clone a jobInfo object
func (ji *JobInfo) Clone() *JobInfo {
info := &JobInfo{
UID: ji.UID,
Name: ji.Name,
Namespace: ji.Namespace,
Queue: ji.Queue,
Priority: ji.Priority,
MinAvailable: ji.MinAvailable,
NodeSelector: map[string]string{},
Allocated: EmptyResource(),
TotalRequest: EmptyResource(),
NodesFitDelta: make(NodeResourceMap),
PDB: ji.PDB,
PodGroup: ji.PodGroup,
TaskStatusIndex: map[TaskStatus]tasksMap{},
Tasks: tasksMap{},
}
ji.CreationTimestamp.DeepCopyInto(&info.CreationTimestamp)
for k, v := range ji.NodeSelector {
info.NodeSelector[k] = v
}
for _, task := range ji.Tasks {
info.AddTaskInfo(task.Clone())
}
return info
}
// String returns a jobInfo object in string format
func (ji JobInfo) String() string {
res := ""
i := 0
for _, task := range ji.Tasks {
res = res + fmt.Sprintf("\n\t %d: %v", i, task)
i++
}
return fmt.Sprintf("Job (%v): namespace %v (%v), name %v, minAvailable %d, podGroup %+v",
ji.UID, ji.Namespace, ji.Queue, ji.Name, ji.MinAvailable, ji.PodGroup) + res
}
// FitError returns detailed information on why a job's task failed to fit on
// each available node
func (ji *JobInfo) FitError() string {
if len(ji.NodesFitDelta) == 0 {
reasonMsg := fmt.Sprintf("0 nodes are available")
return reasonMsg
}
reasons := make(map[string]int)
for _, v := range ji.NodesFitDelta {
if v.Get(v1.ResourceCPU) < 0 {
reasons["cpu"]++
}
if v.Get(v1.ResourceMemory) < 0 {
reasons["memory"]++
}
for rName, rQuant := range v.ScalarResources {
if rQuant < 0 {
reasons[string(rName)]++
}
}
}
sortReasonsHistogram := func() []string {
reasonStrings := []string{}
for k, v := range reasons {
reasonStrings = append(reasonStrings, fmt.Sprintf("%v insufficient %v", v, k))
}
sort.Strings(reasonStrings)
return reasonStrings
}
reasonMsg := fmt.Sprintf("0/%v nodes are available, %v.", len(ji.NodesFitDelta), strings.Join(sortReasonsHistogram(), ", "))
return reasonMsg
}
// ReadyTaskNum returns the number of tasks that are ready.
func (ji *JobInfo) ReadyTaskNum() int32 {
occupid := 0
for status, tasks := range ji.TaskStatusIndex {
if AllocatedStatus(status) ||
status == Succeeded {
occupid = occupid + len(tasks)
}
}
return int32(occupid)
}
// WaitingTaskNum returns the number of tasks that are pipelined.
func (ji *JobInfo) WaitingTaskNum() int32 {
occupid := 0
for status, tasks := range ji.TaskStatusIndex {
if status == Pipelined {
occupid = occupid + len(tasks)
}
}
return int32(occupid)
}
// ValidTaskNum returns the number of tasks that are valid.
func (ji *JobInfo) ValidTaskNum() int32 {
occupied := 0
for status, tasks := range ji.TaskStatusIndex {
if AllocatedStatus(status) ||
status == Succeeded ||
status == Pipelined ||
status == Pending {
occupied = occupied + len(tasks)
}
}
return int32(occupied)
}
// Ready returns whether job is ready for run
func (ji *JobInfo) Ready() bool {
occupied := ji.ReadyTaskNum()
return occupied >= ji.MinAvailable
}
// Pipelined returns whether the number of ready and pipelined task is enough
func (ji *JobInfo) Pipelined() bool {
occupied := ji.WaitingTaskNum() + ji.ReadyTaskNum()
return occupied >= ji.MinAvailable
}

View File

@ -0,0 +1,195 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package api
import (
"fmt"
v1 "k8s.io/api/core/v1"
)
// NodeInfo is node level aggregated information.
type NodeInfo struct {
Name string
Node *v1.Node
// The releasing resource on that node
Releasing *Resource
// The idle resource on that node
Idle *Resource
// The used resource on that node, including running and terminating
// pods
Used *Resource
Allocatable *Resource
Capability *Resource
Tasks map[TaskID]*TaskInfo
}
// NewNodeInfo is used to create new nodeInfo object
func NewNodeInfo(node *v1.Node) *NodeInfo {
if node == nil {
return &NodeInfo{
Releasing: EmptyResource(),
Idle: EmptyResource(),
Used: EmptyResource(),
Allocatable: EmptyResource(),
Capability: EmptyResource(),
Tasks: make(map[TaskID]*TaskInfo),
}
}
return &NodeInfo{
Name: node.Name,
Node: node,
Releasing: EmptyResource(),
Idle: NewResource(node.Status.Allocatable),
Used: EmptyResource(),
Allocatable: NewResource(node.Status.Allocatable),
Capability: NewResource(node.Status.Capacity),
Tasks: make(map[TaskID]*TaskInfo),
}
}
// Clone used to clone nodeInfo Object
func (ni *NodeInfo) Clone() *NodeInfo {
res := NewNodeInfo(ni.Node)
for _, p := range ni.Tasks {
res.AddTask(p)
}
return res
}
// SetNode sets kubernetes node object to nodeInfo object
func (ni *NodeInfo) SetNode(node *v1.Node) {
ni.Name = node.Name
ni.Node = node
ni.Allocatable = NewResource(node.Status.Allocatable)
ni.Capability = NewResource(node.Status.Capacity)
ni.Idle = NewResource(node.Status.Allocatable)
for _, task := range ni.Tasks {
if task.Status == Releasing {
ni.Releasing.Add(task.Resreq)
}
ni.Idle.Sub(task.Resreq)
ni.Used.Add(task.Resreq)
}
}
// AddTask is used to add a task in nodeInfo object
func (ni *NodeInfo) AddTask(task *TaskInfo) error {
key := PodKey(task.Pod)
if _, found := ni.Tasks[key]; found {
return fmt.Errorf("task <%v/%v> already on node <%v>",
task.Namespace, task.Name, ni.Name)
}
// Node will hold a copy of task to make sure the status
// change will not impact resource in node.
ti := task.Clone()
if ni.Node != nil {
switch ti.Status {
case Releasing:
ni.Releasing.Add(ti.Resreq)
ni.Idle.Sub(ti.Resreq)
case Pipelined:
ni.Releasing.Sub(ti.Resreq)
default:
ni.Idle.Sub(ti.Resreq)
}
ni.Used.Add(ti.Resreq)
}
ni.Tasks[key] = ti
return nil
}
// RemoveTask used to remove a task from nodeInfo object
func (ni *NodeInfo) RemoveTask(ti *TaskInfo) error {
key := PodKey(ti.Pod)
task, found := ni.Tasks[key]
if !found {
return fmt.Errorf("failed to find task <%v/%v> on host <%v>",
ti.Namespace, ti.Name, ni.Name)
}
if ni.Node != nil {
switch task.Status {
case Releasing:
ni.Releasing.Sub(task.Resreq)
ni.Idle.Add(task.Resreq)
case Pipelined:
ni.Releasing.Add(task.Resreq)
default:
ni.Idle.Add(task.Resreq)
}
ni.Used.Sub(task.Resreq)
}
delete(ni.Tasks, key)
return nil
}
// UpdateTask is used to update a task in nodeInfo object
func (ni *NodeInfo) UpdateTask(ti *TaskInfo) error {
if err := ni.RemoveTask(ti); err != nil {
return err
}
return ni.AddTask(ti)
}
// String returns nodeInfo details in string format
func (ni NodeInfo) String() string {
res := ""
i := 0
for _, task := range ni.Tasks {
res = res + fmt.Sprintf("\n\t %d: %v", i, task)
i++
}
return fmt.Sprintf("Node (%s): idle <%v>, used <%v>, releasing <%v>, taints <%v>%s",
ni.Name, ni.Idle, ni.Used, ni.Releasing, ni.Node.Spec.Taints, res)
}
// Pods returns all pods running in that node
func (ni *NodeInfo) Pods() (pods []*v1.Pod) {
for _, t := range ni.Tasks {
pods = append(pods, t.Pod)
}
return
}

View File

@ -0,0 +1,73 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package api
import (
v1 "k8s.io/api/core/v1"
)
// Refer k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/predicates.go#GetResourceRequest.
//
// GetResourceRequest returns a *Resource that covers the largest width in each resource dimension.
// Because init-containers run sequentially, we collect the max in each dimension iteratively.
// In contrast, we sum the resource vectors for regular containers since they run simultaneously.
//
// To be consistent with kubernetes default scheduler, it is only used for predicates of actions(e.g.
// allocate, backfill, preempt, reclaim), please use GetPodResourceWithoutInitContainers for other cases.
//
// Example:
//
// Pod:
// InitContainers
// IC1:
// CPU: 2
// Memory: 1G
// IC2:
// CPU: 2
// Memory: 3G
// Containers
// C1:
// CPU: 2
// Memory: 1G
// C2:
// CPU: 1
// Memory: 1G
//
// Result: CPU: 3, Memory: 3G
// GetPodResourceRequest returns all the resource required for that pod
func GetPodResourceRequest(pod *v1.Pod) *Resource {
result := GetPodResourceWithoutInitContainers(pod)
// take max_resource(sum_pod, any_init_container)
for _, container := range pod.Spec.InitContainers {
result.SetMaxResource(NewResource(container.Resources.Requests))
}
return result
}
// GetPodResourceWithoutInitContainers returns Pod's resource request, it does not contain
// init containers' resource request.
func GetPodResourceWithoutInitContainers(pod *v1.Pod) *Resource {
result := EmptyResource()
for _, container := range pod.Spec.Containers {
result.Add(NewResource(container.Resources.Requests))
}
return result
}

View File

@ -0,0 +1,58 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package api
import (
"k8s.io/apimachinery/pkg/types"
arbcorev1 "github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1"
)
// QueueID is UID type, serves as unique ID for each queue
type QueueID types.UID
// QueueInfo will have all details about queue
type QueueInfo struct {
UID QueueID
Name string
Weight int32
Queue *arbcorev1.Queue
}
// NewQueueInfo creates new queueInfo object
func NewQueueInfo(queue *arbcorev1.Queue) *QueueInfo {
return &QueueInfo{
UID: QueueID(queue.Name),
Name: queue.Name,
Weight: queue.Spec.Weight,
Queue: queue,
}
}
// Clone is used to clone queueInfo object
func (q *QueueInfo) Clone() *QueueInfo {
return &QueueInfo{
UID: q.UID,
Name: q.Name,
Weight: q.Weight,
Queue: q.Queue,
}
}

View File

@ -0,0 +1,339 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package api
import (
"fmt"
"math"
"k8s.io/apimachinery/pkg/api/resource"
v1 "k8s.io/api/core/v1"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
)
// Resource struct defines all the resource type
type Resource struct {
MilliCPU float64
Memory float64
// ScalarResources
ScalarResources map[v1.ResourceName]float64
// MaxTaskNum is only used by predicates; it should NOT
// be accounted in other operators, e.g. Add.
MaxTaskNum int
}
const (
// GPUResourceName need to follow https://github.com/NVIDIA/k8s-device-plugin/blob/66a35b71ac4b5cbfb04714678b548bd77e5ba719/server.go#L20
GPUResourceName = "nvidia.com/gpu"
)
// EmptyResource creates a empty resource object and returns
func EmptyResource() *Resource {
return &Resource{}
}
// Clone is used to clone a resource type
func (r *Resource) Clone() *Resource {
clone := &Resource{
MilliCPU: r.MilliCPU,
Memory: r.Memory,
MaxTaskNum: r.MaxTaskNum,
}
if r.ScalarResources != nil {
clone.ScalarResources = make(map[v1.ResourceName]float64)
for k, v := range r.ScalarResources {
clone.ScalarResources[k] = v
}
}
return clone
}
var minMilliCPU float64 = 10
var minMilliScalarResources float64 = 10
var minMemory float64 = 10 * 1024 * 1024
// NewResource create a new resource object from resource list
func NewResource(rl v1.ResourceList) *Resource {
r := EmptyResource()
for rName, rQuant := range rl {
switch rName {
case v1.ResourceCPU:
r.MilliCPU += float64(rQuant.MilliValue())
case v1.ResourceMemory:
r.Memory += float64(rQuant.Value())
case v1.ResourcePods:
r.MaxTaskNum += int(rQuant.Value())
default:
if v1helper.IsScalarResourceName(rName) {
r.AddScalar(rName, float64(rQuant.MilliValue()))
}
}
}
return r
}
// IsEmpty returns bool after checking any of resource is less than min possible value
func (r *Resource) IsEmpty() bool {
if !(r.MilliCPU < minMilliCPU && r.Memory < minMemory) {
return false
}
for _, rQuant := range r.ScalarResources {
if rQuant >= minMilliScalarResources {
return false
}
}
return true
}
// IsZero checks whether that resource is less than min possible value
func (r *Resource) IsZero(rn v1.ResourceName) bool {
switch rn {
case v1.ResourceCPU:
return r.MilliCPU < minMilliCPU
case v1.ResourceMemory:
return r.Memory < minMemory
default:
if r.ScalarResources == nil {
return true
}
if _, ok := r.ScalarResources[rn]; !ok {
panic("unknown resource")
}
return r.ScalarResources[rn] < minMilliScalarResources
}
}
// Add is used to add the two resources
func (r *Resource) Add(rr *Resource) *Resource {
r.MilliCPU += rr.MilliCPU
r.Memory += rr.Memory
for rName, rQuant := range rr.ScalarResources {
if r.ScalarResources == nil {
r.ScalarResources = map[v1.ResourceName]float64{}
}
r.ScalarResources[rName] += rQuant
}
return r
}
//Sub subtracts two Resource objects.
func (r *Resource) Sub(rr *Resource) *Resource {
if rr.LessEqual(r) {
r.MilliCPU -= rr.MilliCPU
r.Memory -= rr.Memory
for rrName, rrQuant := range rr.ScalarResources {
if r.ScalarResources == nil {
return r
}
r.ScalarResources[rrName] -= rrQuant
}
return r
}
panic(fmt.Errorf("Resource is not sufficient to do operation: <%v> sub <%v>",
r, rr))
}
// SetMaxResource compares with ResourceList and takes max value for each Resource.
func (r *Resource) SetMaxResource(rr *Resource) {
if r == nil || rr == nil {
return
}
if rr.MilliCPU > r.MilliCPU {
r.MilliCPU = rr.MilliCPU
}
if rr.Memory > r.Memory {
r.Memory = rr.Memory
}
for rrName, rrQuant := range rr.ScalarResources {
if r.ScalarResources == nil {
r.ScalarResources = make(map[v1.ResourceName]float64)
for k, v := range rr.ScalarResources {
r.ScalarResources[k] = v
}
return
}
if rrQuant > r.ScalarResources[rrName] {
r.ScalarResources[rrName] = rrQuant
}
}
}
//FitDelta Computes the delta between a resource oject representing available
//resources an operand representing resources being requested. Any
//field that is less than 0 after the operation represents an
//insufficient resource.
func (r *Resource) FitDelta(rr *Resource) *Resource {
if rr.MilliCPU > 0 {
r.MilliCPU -= rr.MilliCPU + minMilliCPU
}
if rr.Memory > 0 {
r.Memory -= rr.Memory + minMemory
}
for rrName, rrQuant := range rr.ScalarResources {
if r.ScalarResources == nil {
r.ScalarResources = map[v1.ResourceName]float64{}
}
if rrQuant > 0 {
r.ScalarResources[rrName] -= rrQuant + minMilliScalarResources
}
}
return r
}
// Multi multiples the resource with ratio provided
func (r *Resource) Multi(ratio float64) *Resource {
r.MilliCPU = r.MilliCPU * ratio
r.Memory = r.Memory * ratio
for rName, rQuant := range r.ScalarResources {
r.ScalarResources[rName] = rQuant * ratio
}
return r
}
// Less checks whether a resource is less than other
func (r *Resource) Less(rr *Resource) bool {
if !(r.MilliCPU < rr.MilliCPU && r.Memory < rr.Memory) {
return false
}
if r.ScalarResources == nil {
if rr.ScalarResources == nil {
return false
}
return true
}
for rName, rQuant := range r.ScalarResources {
if rr.ScalarResources == nil {
return false
}
rrQuant := rr.ScalarResources[rName]
if rQuant >= rrQuant {
return false
}
}
return true
}
// LessEqual checks whether a resource is less than other resource
func (r *Resource) LessEqual(rr *Resource) bool {
isLess := (r.MilliCPU < rr.MilliCPU || math.Abs(rr.MilliCPU-r.MilliCPU) < minMilliCPU) &&
(r.Memory < rr.Memory || math.Abs(rr.Memory-r.Memory) < minMemory)
if !isLess {
return false
}
if r.ScalarResources == nil {
return true
}
for rName, rQuant := range r.ScalarResources {
if rr.ScalarResources == nil {
return false
}
rrQuant := rr.ScalarResources[rName]
if !(rQuant < rrQuant || math.Abs(rrQuant-rQuant) < minMilliScalarResources) {
return false
}
}
return true
}
// String returns resource details in string format
func (r *Resource) String() string {
str := fmt.Sprintf("cpu %0.2f, memory %0.2f", r.MilliCPU, r.Memory)
for rName, rQuant := range r.ScalarResources {
str = fmt.Sprintf("%s, %s %0.2f", str, rName, rQuant)
}
return str
}
// Get returns the resource value for that particular resource type
func (r *Resource) Get(rn v1.ResourceName) float64 {
switch rn {
case v1.ResourceCPU:
return r.MilliCPU
case v1.ResourceMemory:
return r.Memory
default:
if r.ScalarResources == nil {
return 0
}
return r.ScalarResources[rn]
}
}
// ResourceNames returns all resource types
func (r *Resource) ResourceNames() []v1.ResourceName {
resNames := []v1.ResourceName{v1.ResourceCPU, v1.ResourceMemory}
for rName := range r.ScalarResources {
resNames = append(resNames, rName)
}
return resNames
}
// AddScalar adds a resource by a scalar value of this resource.
func (r *Resource) AddScalar(name v1.ResourceName, quantity float64) {
r.SetScalar(name, r.ScalarResources[name]+quantity)
}
// SetScalar sets a resource by a scalar value of this resource.
func (r *Resource) SetScalar(name v1.ResourceName, quantity float64) {
// Lazily allocate scalar resource map.
if r.ScalarResources == nil {
r.ScalarResources = map[v1.ResourceName]float64{}
}
r.ScalarResources[name] = quantity
}
func (r *Resource) Convert2K8sResource() *v1.ResourceList {
list := v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(int64(r.MilliCPU), resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(int64(r.Memory), resource.BinarySI),
}
for name, value := range r.ScalarResources {
list[name] = *resource.NewQuantity(int64(value), resource.BinarySI)
}
return &list
}

View File

@ -0,0 +1,132 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package api
import (
"fmt"
"reflect"
v1 "k8s.io/api/core/v1"
"k8s.io/api/policy/v1beta1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
)
func nodesEqual(l, r map[string]*NodeInfo) bool {
if len(l) != len(r) {
return false
}
for k, n := range l {
if !reflect.DeepEqual(n, r[k]) {
return false
}
}
return true
}
func podsEqual(l, r map[string]*TaskInfo) bool {
if len(l) != len(r) {
return false
}
for k, p := range l {
if !reflect.DeepEqual(p, r[k]) {
return false
}
}
return true
}
func buildNode(name string, alloc v1.ResourceList) *v1.Node {
return &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Status: v1.NodeStatus{
Capacity: alloc,
Allocatable: alloc,
},
}
}
func buildPod(ns, n, nn string, p v1.PodPhase, req v1.ResourceList, owner []metav1.OwnerReference, labels map[string]string) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: types.UID(fmt.Sprintf("%v-%v", ns, n)),
Name: n,
Namespace: ns,
OwnerReferences: owner,
Labels: labels,
},
Status: v1.PodStatus{
Phase: p,
},
Spec: v1.PodSpec{
NodeName: nn,
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: req,
},
},
},
},
}
}
func buildPdb(n string, min int, selectorMap map[string]string) *v1beta1.PodDisruptionBudget {
selector := &metav1.LabelSelector{
MatchLabels: selectorMap,
}
minAvailable := intstr.FromInt(min)
return &v1beta1.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{
Name: n,
},
Spec: v1beta1.PodDisruptionBudgetSpec{
Selector: selector,
MinAvailable: &minAvailable,
},
}
}
func buildResourceList(cpu string, memory string) v1.ResourceList {
return v1.ResourceList{
v1.ResourceCPU: resource.MustParse(cpu),
v1.ResourceMemory: resource.MustParse(memory),
}
}
func buildResource(cpu string, memory string) *Resource {
return NewResource(v1.ResourceList{
v1.ResourceCPU: resource.MustParse(cpu),
v1.ResourceMemory: resource.MustParse(memory),
})
}
func buildOwnerReference(owner string) metav1.OwnerReference {
controller := true
return metav1.OwnerReference{
Controller: &controller,
UID: types.UID(owner),
}
}

View File

@ -0,0 +1,108 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package api
// TaskStatus defines the status of a task/pod.
type TaskStatus int
const (
// Pending means the task is pending in the apiserver.
Pending TaskStatus = 1 << iota
// Allocated means the scheduler assigns a host to it.
Allocated
// Pipelined means the scheduler assigns a host to wait for releasing resource.
Pipelined
// Binding means the scheduler send Bind request to apiserver.
Binding
// Bound means the task/Pod bounds to a host.
Bound
// Running means a task is running on the host.
Running
// Releasing means a task/pod is deleted.
Releasing
// Succeeded means that all containers in the pod have voluntarily terminated
// with a container exit code of 0, and the system is not going to restart any of these containers.
Succeeded
// Failed means that all containers in the pod have terminated, and at least one container has
// terminated in a failure (exited with a non-zero exit code or was stopped by the system).
Failed
// Unknown means the status of task/pod is unknown to the scheduler.
Unknown
)
func (ts TaskStatus) String() string {
switch ts {
case Pending:
return "Pending"
case Binding:
return "Binding"
case Bound:
return "Bound"
case Running:
return "Running"
case Releasing:
return "Releasing"
case Succeeded:
return "Succeeded"
case Failed:
return "Failed"
default:
return "Unknown"
}
}
// validateStatusUpdate validates whether the status transfer is valid.
func validateStatusUpdate(oldStatus, newStatus TaskStatus) error {
return nil
}
// LessFn is the func declaration used by sort or priority queue.
type LessFn func(interface{}, interface{}) bool
// CompareFn is the func declaration used by sort or priority queue.
type CompareFn func(interface{}, interface{}) int
// ValidateFn is the func declaration used to check object's status.
type ValidateFn func(interface{}) bool
// ValidateResult is struct to which can used to determine the result
type ValidateResult struct {
Pass bool
Reason string
Message string
}
// ValidateExFn is the func declaration used to validate the result
type ValidateExFn func(interface{}) *ValidateResult
// PredicateFn is the func declaration used to predicate node for task.
type PredicateFn func(*TaskInfo, *NodeInfo) error
// EvictableFn is the func declaration used to evict tasks.
type EvictableFn func(*TaskInfo, []*TaskInfo) []*TaskInfo
// NodeOrderFn is the func declaration used to get priority score for a node for a particular task.
type NodeOrderFn func(*TaskInfo, *NodeInfo) (float64, error)

View File

@ -0,0 +1,665 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"fmt"
"strings"
"sync"
"time"
"github.com/golang/glog"
v1 "k8s.io/api/core/v1"
"k8s.io/api/scheduling/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
infov1 "k8s.io/client-go/informers/core/v1"
policyv1 "k8s.io/client-go/informers/policy/v1beta1"
schedv1 "k8s.io/client-go/informers/scheduling/v1beta1"
storagev1 "k8s.io/client-go/informers/storage/v1"
"k8s.io/client-go/kubernetes"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/scheduler/volumebinder"
"github.com/kubernetes-sigs/kube-batch/cmd/kube-batch/app/options"
"github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1"
kbver "github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned"
"github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/scheme"
kbschema "github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/scheme"
kbinfo "github.com/kubernetes-sigs/kube-batch/pkg/client/informers/externalversions"
kbinfov1 "github.com/kubernetes-sigs/kube-batch/pkg/client/informers/externalversions/scheduling/v1alpha1"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api"
kbapi "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api"
)
func init() {
schemeBuilder := runtime.SchemeBuilder{
v1.AddToScheme,
}
utilruntime.Must(schemeBuilder.AddToScheme(kbschema.Scheme))
}
// New returns a Cache implementation.
func New(config *rest.Config, schedulerName string, defaultQueue string) Cache {
return newSchedulerCache(config, schedulerName, defaultQueue)
}
//SchedulerCache cache for the kube batch
type SchedulerCache struct {
sync.Mutex
kubeclient *kubernetes.Clientset
kbclient *kbver.Clientset
defaultQueue string
// schedulerName is the name for kube batch scheduler
schedulerName string
podInformer infov1.PodInformer
nodeInformer infov1.NodeInformer
pdbInformer policyv1.PodDisruptionBudgetInformer
nsInformer infov1.NamespaceInformer
podGroupInformer kbinfov1.PodGroupInformer
queueInformer kbinfov1.QueueInformer
pvInformer infov1.PersistentVolumeInformer
pvcInformer infov1.PersistentVolumeClaimInformer
scInformer storagev1.StorageClassInformer
pcInformer schedv1.PriorityClassInformer
Binder Binder
Evictor Evictor
StatusUpdater StatusUpdater
VolumeBinder VolumeBinder
Recorder record.EventRecorder
Jobs map[kbapi.JobID]*kbapi.JobInfo
Nodes map[string]*kbapi.NodeInfo
Queues map[kbapi.QueueID]*kbapi.QueueInfo
PriorityClasses map[string]*v1beta1.PriorityClass
defaultPriorityClass *v1beta1.PriorityClass
defaultPriority int32
errTasks workqueue.RateLimitingInterface
deletedJobs workqueue.RateLimitingInterface
}
type defaultBinder struct {
kubeclient *kubernetes.Clientset
}
//Bind will send bind request to api server
func (db *defaultBinder) Bind(p *v1.Pod, hostname string) error {
if err := db.kubeclient.CoreV1().Pods(p.Namespace).Bind(&v1.Binding{
ObjectMeta: metav1.ObjectMeta{Namespace: p.Namespace, Name: p.Name, UID: p.UID},
Target: v1.ObjectReference{
Kind: "Node",
Name: hostname,
},
}); err != nil {
glog.Errorf("Failed to bind pod <%v/%v>: %#v", p.Namespace, p.Name, err)
return err
}
return nil
}
type defaultEvictor struct {
kubeclient *kubernetes.Clientset
}
//Evict will send delete pod request to api server
func (de *defaultEvictor) Evict(p *v1.Pod) error {
glog.V(3).Infof("Evicting pod %v/%v", p.Namespace, p.Name)
if err := de.kubeclient.CoreV1().Pods(p.Namespace).Delete(p.Name, nil); err != nil {
glog.Errorf("Failed to evict pod <%v/%v>: %#v", p.Namespace, p.Name, err)
return err
}
return nil
}
// defaultStatusUpdater is the default implementation of the StatusUpdater interface
type defaultStatusUpdater struct {
kubeclient *kubernetes.Clientset
kbclient *kbver.Clientset
}
// UpdatePodCondition will Update pod with podCondition
func (su *defaultStatusUpdater) UpdatePodCondition(pod *v1.Pod, condition *v1.PodCondition) (*v1.Pod, error) {
glog.V(3).Infof("Updating pod condition for %s/%s to (%s==%s)", pod.Namespace, pod.Name, condition.Type, condition.Status)
if podutil.UpdatePodCondition(&pod.Status, condition) {
return su.kubeclient.CoreV1().Pods(pod.Namespace).UpdateStatus(pod)
}
return pod, nil
}
// UpdatePodGroup will Update pod with podCondition
func (su *defaultStatusUpdater) UpdatePodGroup(pg *v1alpha1.PodGroup) (*v1alpha1.PodGroup, error) {
return su.kbclient.SchedulingV1alpha1().PodGroups(pg.Namespace).Update(pg)
}
type defaultVolumeBinder struct {
volumeBinder *volumebinder.VolumeBinder
}
// AllocateVolumes allocates volume on the host to the task
func (dvb *defaultVolumeBinder) AllocateVolumes(task *api.TaskInfo, hostname string) error {
allBound, err := dvb.volumeBinder.Binder.AssumePodVolumes(task.Pod, hostname)
task.VolumeReady = allBound
return err
}
// BindVolumes binds volumes to the task
func (dvb *defaultVolumeBinder) BindVolumes(task *api.TaskInfo) error {
// If task's volumes are ready, did not bind them again.
if task.VolumeReady {
return nil
}
return dvb.volumeBinder.Binder.BindPodVolumes(task.Pod)
}
func newSchedulerCache(config *rest.Config, schedulerName string, defaultQueue string) *SchedulerCache {
sc := &SchedulerCache{
Jobs: make(map[kbapi.JobID]*kbapi.JobInfo),
Nodes: make(map[string]*kbapi.NodeInfo),
Queues: make(map[kbapi.QueueID]*kbapi.QueueInfo),
PriorityClasses: make(map[string]*v1beta1.PriorityClass),
errTasks: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
deletedJobs: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
kubeclient: kubernetes.NewForConfigOrDie(config),
kbclient: kbver.NewForConfigOrDie(config),
defaultQueue: defaultQueue,
schedulerName: schedulerName,
}
// Prepare event clients.
broadcaster := record.NewBroadcaster()
broadcaster.StartRecordingToSink(&corev1.EventSinkImpl{Interface: sc.kubeclient.CoreV1().Events("")})
sc.Recorder = broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "kube-batch"})
sc.Binder = &defaultBinder{
kubeclient: sc.kubeclient,
}
sc.Evictor = &defaultEvictor{
kubeclient: sc.kubeclient,
}
sc.StatusUpdater = &defaultStatusUpdater{
kubeclient: sc.kubeclient,
kbclient: sc.kbclient,
}
informerFactory := informers.NewSharedInformerFactory(sc.kubeclient, 0)
sc.pvcInformer = informerFactory.Core().V1().PersistentVolumeClaims()
sc.pvInformer = informerFactory.Core().V1().PersistentVolumes()
sc.scInformer = informerFactory.Storage().V1().StorageClasses()
sc.VolumeBinder = &defaultVolumeBinder{
volumeBinder: volumebinder.NewVolumeBinder(
sc.kubeclient,
sc.pvcInformer,
sc.pvInformer,
sc.scInformer,
30*time.Second,
),
}
// create informer for node information
sc.nodeInformer = informerFactory.Core().V1().Nodes()
sc.nodeInformer.Informer().AddEventHandlerWithResyncPeriod(
cache.ResourceEventHandlerFuncs{
AddFunc: sc.AddNode,
UpdateFunc: sc.UpdateNode,
DeleteFunc: sc.DeleteNode,
},
0,
)
// create informer for pod information
sc.podInformer = informerFactory.Core().V1().Pods()
sc.podInformer.Informer().AddEventHandler(
cache.FilteringResourceEventHandler{
FilterFunc: func(obj interface{}) bool {
switch obj.(type) {
case *v1.Pod:
pod := obj.(*v1.Pod)
if strings.Compare(pod.Spec.SchedulerName, schedulerName) == 0 && pod.Status.Phase == v1.PodPending {
return true
}
return pod.Status.Phase != v1.PodPending
default:
return false
}
},
Handler: cache.ResourceEventHandlerFuncs{
AddFunc: sc.AddPod,
UpdateFunc: sc.UpdatePod,
DeleteFunc: sc.DeletePod,
},
})
sc.pdbInformer = informerFactory.Policy().V1beta1().PodDisruptionBudgets()
sc.pdbInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: sc.AddPDB,
UpdateFunc: sc.UpdatePDB,
DeleteFunc: sc.DeletePDB,
})
sc.pcInformer = informerFactory.Scheduling().V1beta1().PriorityClasses()
sc.pcInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: sc.AddPriorityClass,
UpdateFunc: sc.UpdatePriorityClass,
DeleteFunc: sc.DeletePriorityClass,
})
kbinformer := kbinfo.NewSharedInformerFactory(sc.kbclient, 0)
// create informer for PodGroup information
sc.podGroupInformer = kbinformer.Scheduling().V1alpha1().PodGroups()
sc.podGroupInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: sc.AddPodGroup,
UpdateFunc: sc.UpdatePodGroup,
DeleteFunc: sc.DeletePodGroup,
})
// create informer for Queue information
sc.queueInformer = kbinformer.Scheduling().V1alpha1().Queues()
sc.queueInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: sc.AddQueue,
UpdateFunc: sc.UpdateQueue,
DeleteFunc: sc.DeleteQueue,
})
return sc
}
// Run starts the schedulerCache
func (sc *SchedulerCache) Run(stopCh <-chan struct{}) {
go sc.pdbInformer.Informer().Run(stopCh)
go sc.podInformer.Informer().Run(stopCh)
go sc.nodeInformer.Informer().Run(stopCh)
go sc.podGroupInformer.Informer().Run(stopCh)
go sc.pvInformer.Informer().Run(stopCh)
go sc.pvcInformer.Informer().Run(stopCh)
go sc.scInformer.Informer().Run(stopCh)
go sc.queueInformer.Informer().Run(stopCh)
if options.ServerOpts.EnablePriorityClass {
go sc.pcInformer.Informer().Run(stopCh)
}
// Re-sync error tasks.
go wait.Until(sc.processResyncTask, 0, stopCh)
// Cleanup jobs.
go wait.Until(sc.processCleanupJob, 0, stopCh)
}
// WaitForCacheSync sync the cache with the api server
func (sc *SchedulerCache) WaitForCacheSync(stopCh <-chan struct{}) bool {
return cache.WaitForCacheSync(stopCh,
func() []cache.InformerSynced {
informerSynced := []cache.InformerSynced{
sc.pdbInformer.Informer().HasSynced,
sc.podInformer.Informer().HasSynced,
sc.podGroupInformer.Informer().HasSynced,
sc.nodeInformer.Informer().HasSynced,
sc.pvInformer.Informer().HasSynced,
sc.pvcInformer.Informer().HasSynced,
sc.scInformer.Informer().HasSynced,
sc.queueInformer.Informer().HasSynced,
}
if options.ServerOpts.EnablePriorityClass {
informerSynced = append(informerSynced, sc.pcInformer.Informer().HasSynced)
}
return informerSynced
}()...,
)
}
// findJobAndTask returns job and the task info
func (sc *SchedulerCache) findJobAndTask(taskInfo *kbapi.TaskInfo) (*kbapi.JobInfo, *kbapi.TaskInfo, error) {
job, found := sc.Jobs[taskInfo.Job]
if !found {
return nil, nil, fmt.Errorf("failed to find Job %v for Task %v",
taskInfo.Job, taskInfo.UID)
}
task, found := job.Tasks[taskInfo.UID]
if !found {
return nil, nil, fmt.Errorf("failed to find task in status %v by id %v",
taskInfo.Status, taskInfo.UID)
}
return job, task, nil
}
// Evict will evict the pod
func (sc *SchedulerCache) Evict(taskInfo *kbapi.TaskInfo, reason string) error {
sc.Mutex.Lock()
defer sc.Mutex.Unlock()
job, task, err := sc.findJobAndTask(taskInfo)
if err != nil {
return err
}
node, found := sc.Nodes[task.NodeName]
if !found {
return fmt.Errorf("failed to bind Task %v to host %v, host does not exist",
task.UID, task.NodeName)
}
err = job.UpdateTaskStatus(task, kbapi.Releasing)
if err != nil {
return err
}
// Add new task to node.
if err := node.UpdateTask(task); err != nil {
return err
}
p := task.Pod
go func() {
err := sc.Evictor.Evict(p)
if err != nil {
sc.resyncTask(task)
}
}()
if !shadowPodGroup(job.PodGroup) {
sc.Recorder.Eventf(job.PodGroup, v1.EventTypeNormal, "Evict", reason)
}
return nil
}
// Bind binds task to the target host.
func (sc *SchedulerCache) Bind(taskInfo *kbapi.TaskInfo, hostname string) error {
sc.Mutex.Lock()
defer sc.Mutex.Unlock()
job, task, err := sc.findJobAndTask(taskInfo)
if err != nil {
return err
}
node, found := sc.Nodes[hostname]
if !found {
return fmt.Errorf("failed to bind Task %v to host %v, host does not exist",
task.UID, hostname)
}
err = job.UpdateTaskStatus(task, kbapi.Binding)
if err != nil {
return err
}
// Set `.nodeName` to the hostname
task.NodeName = hostname
// Add task to the node.
if err := node.AddTask(task); err != nil {
return err
}
p := task.Pod
go func() {
if err := sc.Binder.Bind(p, hostname); err != nil {
sc.resyncTask(task)
} else {
sc.Recorder.Eventf(p, v1.EventTypeNormal, "Scheduled", "Successfully assigned %v/%v to %v", p.Namespace, p.Name, hostname)
}
}()
return nil
}
// AllocateVolumes allocates volume on the host to the task
func (sc *SchedulerCache) AllocateVolumes(task *api.TaskInfo, hostname string) error {
return sc.VolumeBinder.AllocateVolumes(task, hostname)
}
// BindVolumes binds volumes to the task
func (sc *SchedulerCache) BindVolumes(task *api.TaskInfo) error {
return sc.VolumeBinder.BindVolumes(task)
}
// taskUnschedulable updates pod status of pending task
func (sc *SchedulerCache) taskUnschedulable(task *api.TaskInfo, message string) error {
sc.Mutex.Lock()
defer sc.Mutex.Unlock()
pod := task.Pod.DeepCopy()
sc.Recorder.Eventf(pod, v1.EventTypeWarning, string(v1.PodReasonUnschedulable), message)
if _, err := sc.StatusUpdater.UpdatePodCondition(pod, &v1.PodCondition{
Type: v1.PodScheduled,
Status: v1.ConditionFalse,
Reason: v1.PodReasonUnschedulable,
Message: message,
}); err != nil {
return err
}
return nil
}
func (sc *SchedulerCache) deleteJob(job *kbapi.JobInfo) {
glog.V(3).Infof("Try to delete Job <%v:%v/%v>", job.UID, job.Namespace, job.Name)
sc.deletedJobs.AddRateLimited(job)
}
func (sc *SchedulerCache) processCleanupJob() {
obj, shutdown := sc.deletedJobs.Get()
if shutdown {
return
}
defer sc.deletedJobs.Done(obj)
job, found := obj.(*kbapi.JobInfo)
if !found {
glog.Errorf("Failed to convert <%v> to *JobInfo", obj)
return
}
sc.Mutex.Lock()
defer sc.Mutex.Unlock()
if kbapi.JobTerminated(job) {
delete(sc.Jobs, job.UID)
glog.V(3).Infof("Job <%v:%v/%v> was deleted.", job.UID, job.Namespace, job.Name)
} else {
// Retry
sc.deleteJob(job)
}
}
func (sc *SchedulerCache) resyncTask(task *kbapi.TaskInfo) {
sc.errTasks.AddRateLimited(task)
}
func (sc *SchedulerCache) processResyncTask() {
obj, shutdown := sc.errTasks.Get()
if shutdown {
return
}
defer sc.errTasks.Done(obj)
task, ok := obj.(*kbapi.TaskInfo)
if !ok {
glog.Errorf("failed to convert %v to *v1.Pod", obj)
return
}
if err := sc.syncTask(task); err != nil {
glog.Errorf("Failed to sync pod <%v/%v>, retry it.", task.Namespace, task.Name)
sc.resyncTask(task)
}
}
// Snapshot returns the complete snapshot of the cluster from cache
func (sc *SchedulerCache) Snapshot() *kbapi.ClusterInfo {
sc.Mutex.Lock()
defer sc.Mutex.Unlock()
snapshot := &kbapi.ClusterInfo{
Nodes: make(map[string]*kbapi.NodeInfo),
Jobs: make(map[kbapi.JobID]*kbapi.JobInfo),
Queues: make(map[kbapi.QueueID]*kbapi.QueueInfo),
}
for _, value := range sc.Nodes {
snapshot.Nodes[value.Name] = value.Clone()
}
for _, value := range sc.Queues {
snapshot.Queues[value.UID] = value.Clone()
}
for _, value := range sc.Jobs {
// If no scheduling spec, does not handle it.
if value.PodGroup == nil && value.PDB == nil {
glog.V(4).Infof("The scheduling spec of Job <%v:%s/%s> is nil, ignore it.",
value.UID, value.Namespace, value.Name)
continue
}
if _, found := snapshot.Queues[value.Queue]; !found {
glog.V(3).Infof("The Queue <%v> of Job <%v/%v> does not exist, ignore it.",
value.Queue, value.Namespace, value.Name)
continue
}
if value.PodGroup != nil {
value.Priority = sc.defaultPriority
priName := value.PodGroup.Spec.PriorityClassName
if priorityClass, found := sc.PriorityClasses[priName]; found {
value.Priority = priorityClass.Value
}
glog.V(4).Infof("The priority of job <%s/%s> is <%s/%d>",
value.Namespace, value.Name, priName, value.Priority)
}
snapshot.Jobs[value.UID] = value.Clone()
}
glog.V(3).Infof("There are <%d> Jobs, <%d> Queues and <%d> Nodes in total for scheduling.",
len(snapshot.Jobs), len(snapshot.Queues), len(snapshot.Nodes))
return snapshot
}
// String returns information about the cache in a string format
func (sc *SchedulerCache) String() string {
sc.Mutex.Lock()
defer sc.Mutex.Unlock()
str := "Cache:\n"
if len(sc.Nodes) != 0 {
str = str + "Nodes:\n"
for _, n := range sc.Nodes {
str = str + fmt.Sprintf("\t %s: idle(%v) used(%v) allocatable(%v) pods(%d)\n",
n.Name, n.Idle, n.Used, n.Allocatable, len(n.Tasks))
i := 0
for _, p := range n.Tasks {
str = str + fmt.Sprintf("\t\t %d: %v\n", i, p)
i++
}
}
}
if len(sc.Jobs) != 0 {
str = str + "Jobs:\n"
for _, job := range sc.Jobs {
str = str + fmt.Sprintf("\t %s\n", job)
}
}
return str
}
// RecordJobStatusEvent records related events according to job status.
func (sc *SchedulerCache) RecordJobStatusEvent(job *kbapi.JobInfo) {
jobErrMsg := job.FitError()
if !shadowPodGroup(job.PodGroup) {
pgUnschedulable := job.PodGroup != nil &&
(job.PodGroup.Status.Phase == v1alpha1.PodGroupUnknown ||
job.PodGroup.Status.Phase == v1alpha1.PodGroupPending)
pdbUnschedulabe := job.PDB != nil && len(job.TaskStatusIndex[api.Pending]) != 0
// If pending or unschedulable, record unschedulable event.
if pgUnschedulable || pdbUnschedulabe {
msg := fmt.Sprintf("%v/%v tasks in gang unschedulable: %v",
len(job.TaskStatusIndex[api.Pending]), len(job.Tasks), job.FitError())
sc.Recorder.Eventf(job.PodGroup, v1.EventTypeWarning,
string(v1alpha1.PodGroupUnschedulableType), msg)
}
}
// Update podCondition for tasks Allocated and Pending before job discarded
for _, status := range []api.TaskStatus{api.Allocated, api.Pending} {
for _, taskInfo := range job.TaskStatusIndex[status] {
if err := sc.taskUnschedulable(taskInfo, jobErrMsg); err != nil {
glog.Errorf("Failed to update unschedulable task status <%s/%s>: %v",
taskInfo.Namespace, taskInfo.Name, err)
}
}
}
}
// UpdateJobStatus update the status of job and its tasks.
func (sc *SchedulerCache) UpdateJobStatus(job *kbapi.JobInfo) (*kbapi.JobInfo, error) {
if !shadowPodGroup(job.PodGroup) {
pg, err := sc.StatusUpdater.UpdatePodGroup(job.PodGroup)
if err != nil {
return nil, err
}
job.PodGroup = pg
}
sc.RecordJobStatusEvent(job)
return job, nil
}

View File

@ -0,0 +1,795 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"fmt"
"reflect"
"github.com/golang/glog"
v1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1beta1"
"k8s.io/api/scheduling/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/cache"
kbv1 "github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1"
"github.com/kubernetes-sigs/kube-batch/pkg/apis/utils"
kbapi "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api"
)
func isTerminated(status kbapi.TaskStatus) bool {
return status == kbapi.Succeeded || status == kbapi.Failed
}
// getOrCreateJob will return corresponding Job for pi if it exists, or it will create a Job and return it if
// pi.Pod.Spec.SchedulerName is same as kube-batch scheduler's name, otherwise it will return nil.
func (sc *SchedulerCache) getOrCreateJob(pi *kbapi.TaskInfo) *kbapi.JobInfo {
if len(pi.Job) == 0 {
if pi.Pod.Spec.SchedulerName != sc.schedulerName {
glog.V(4).Infof("Pod %s/%s will not not scheduled by %s, skip creating PodGroup and Job for it",
pi.Pod.Namespace, pi.Pod.Name, sc.schedulerName)
return nil
}
pb := createShadowPodGroup(pi.Pod)
pi.Job = kbapi.JobID(pb.Name)
if _, found := sc.Jobs[pi.Job]; !found {
job := kbapi.NewJobInfo(pi.Job)
job.SetPodGroup(pb)
// Set default queue for shadow podgroup.
job.Queue = kbapi.QueueID(sc.defaultQueue)
sc.Jobs[pi.Job] = job
}
} else {
if _, found := sc.Jobs[pi.Job]; !found {
sc.Jobs[pi.Job] = kbapi.NewJobInfo(pi.Job)
}
}
return sc.Jobs[pi.Job]
}
func (sc *SchedulerCache) addTask(pi *kbapi.TaskInfo) error {
job := sc.getOrCreateJob(pi)
if job != nil {
job.AddTaskInfo(pi)
}
if len(pi.NodeName) != 0 {
if _, found := sc.Nodes[pi.NodeName]; !found {
sc.Nodes[pi.NodeName] = kbapi.NewNodeInfo(nil)
}
node := sc.Nodes[pi.NodeName]
if !isTerminated(pi.Status) {
return node.AddTask(pi)
}
}
return nil
}
// Assumes that lock is already acquired.
func (sc *SchedulerCache) addPod(pod *v1.Pod) error {
pi := kbapi.NewTaskInfo(pod)
return sc.addTask(pi)
}
func (sc *SchedulerCache) syncTask(oldTask *kbapi.TaskInfo) error {
sc.Mutex.Lock()
defer sc.Mutex.Unlock()
newPod, err := sc.kubeclient.CoreV1().Pods(oldTask.Namespace).Get(oldTask.Name, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
sc.deleteTask(oldTask)
glog.V(3).Infof("Pod <%v/%v> was deleted, removed from cache.", oldTask.Namespace, oldTask.Name)
return nil
}
return fmt.Errorf("failed to get Pod <%v/%v>: err %v", oldTask.Namespace, oldTask.Name, err)
}
newTask := kbapi.NewTaskInfo(newPod)
return sc.updateTask(oldTask, newTask)
}
func (sc *SchedulerCache) updateTask(oldTask, newTask *kbapi.TaskInfo) error {
if err := sc.deleteTask(oldTask); err != nil {
return err
}
return sc.addTask(newTask)
}
// Assumes that lock is already acquired.
func (sc *SchedulerCache) updatePod(oldPod, newPod *v1.Pod) error {
if err := sc.deletePod(oldPod); err != nil {
return err
}
return sc.addPod(newPod)
}
func (sc *SchedulerCache) deleteTask(pi *kbapi.TaskInfo) error {
var jobErr, nodeErr error
if len(pi.Job) != 0 {
if job, found := sc.Jobs[pi.Job]; found {
jobErr = job.DeleteTaskInfo(pi)
} else {
jobErr = fmt.Errorf("failed to find Job <%v> for Task %v/%v",
pi.Job, pi.Namespace, pi.Name)
}
}
if len(pi.NodeName) != 0 {
node := sc.Nodes[pi.NodeName]
if node != nil {
nodeErr = node.RemoveTask(pi)
}
}
if jobErr != nil || nodeErr != nil {
return kbapi.MergeErrors(jobErr, nodeErr)
}
return nil
}
// Assumes that lock is already acquired.
func (sc *SchedulerCache) deletePod(pod *v1.Pod) error {
pi := kbapi.NewTaskInfo(pod)
// Delete the Task in cache to handle Binding status.
task := pi
if job, found := sc.Jobs[pi.Job]; found {
if t, found := job.Tasks[pi.UID]; found {
task = t
}
}
if err := sc.deleteTask(task); err != nil {
return err
}
// If job was terminated, delete it.
if job, found := sc.Jobs[pi.Job]; found && kbapi.JobTerminated(job) {
sc.deleteJob(job)
}
return nil
}
// AddPod add pod to scheduler cache
func (sc *SchedulerCache) AddPod(obj interface{}) {
pod, ok := obj.(*v1.Pod)
if !ok {
glog.Errorf("Cannot convert to *v1.Pod: %v", obj)
return
}
sc.Mutex.Lock()
defer sc.Mutex.Unlock()
err := sc.addPod(pod)
if err != nil {
glog.Errorf("Failed to add pod <%s/%s> into cache: %v",
pod.Namespace, pod.Name, err)
return
}
glog.V(3).Infof("Added pod <%s/%v> into cache.", pod.Namespace, pod.Name)
return
}
// UpdatePod update pod to scheduler cache
func (sc *SchedulerCache) UpdatePod(oldObj, newObj interface{}) {
oldPod, ok := oldObj.(*v1.Pod)
if !ok {
glog.Errorf("Cannot convert oldObj to *v1.Pod: %v", oldObj)
return
}
newPod, ok := newObj.(*v1.Pod)
if !ok {
glog.Errorf("Cannot convert newObj to *v1.Pod: %v", newObj)
return
}
sc.Mutex.Lock()
defer sc.Mutex.Unlock()
err := sc.updatePod(oldPod, newPod)
if err != nil {
glog.Errorf("Failed to update pod %v in cache: %v", oldPod.Name, err)
return
}
glog.V(3).Infof("Updated pod <%s/%v> in cache.", oldPod.Namespace, oldPod.Name)
return
}
// DeletePod delete pod from scheduler cache
func (sc *SchedulerCache) DeletePod(obj interface{}) {
var pod *v1.Pod
switch t := obj.(type) {
case *v1.Pod:
pod = t
case cache.DeletedFinalStateUnknown:
var ok bool
pod, ok = t.Obj.(*v1.Pod)
if !ok {
glog.Errorf("Cannot convert to *v1.Pod: %v", t.Obj)
return
}
default:
glog.Errorf("Cannot convert to *v1.Pod: %v", t)
return
}
sc.Mutex.Lock()
defer sc.Mutex.Unlock()
err := sc.deletePod(pod)
if err != nil {
glog.Errorf("Failed to delete pod %v from cache: %v", pod.Name, err)
return
}
glog.V(3).Infof("Deleted pod <%s/%v> from cache.", pod.Namespace, pod.Name)
return
}
// Assumes that lock is already acquired.
func (sc *SchedulerCache) addNode(node *v1.Node) error {
if sc.Nodes[node.Name] != nil {
sc.Nodes[node.Name].SetNode(node)
} else {
sc.Nodes[node.Name] = kbapi.NewNodeInfo(node)
}
return nil
}
func isNodeInfoUpdated(oldNode, newNode *v1.Node) bool {
return !reflect.DeepEqual(oldNode.Status.Allocatable, newNode.Status.Allocatable) ||
!reflect.DeepEqual(oldNode.Spec.Taints, newNode.Spec.Taints) ||
!reflect.DeepEqual(oldNode.Labels, newNode.Labels) ||
!reflect.DeepEqual(oldNode.Spec.Unschedulable, newNode.Spec.Unschedulable)
}
// Assumes that lock is already acquired.
func (sc *SchedulerCache) updateNode(oldNode, newNode *v1.Node) error {
if sc.Nodes[newNode.Name] != nil {
if isNodeInfoUpdated(oldNode, newNode) {
sc.Nodes[newNode.Name].SetNode(newNode)
}
return nil
}
return fmt.Errorf("node <%s> does not exist", newNode.Name)
}
// Assumes that lock is already acquired.
func (sc *SchedulerCache) deleteNode(node *v1.Node) error {
if _, ok := sc.Nodes[node.Name]; !ok {
return fmt.Errorf("node <%s> does not exist", node.Name)
}
delete(sc.Nodes, node.Name)
return nil
}
// AddNode add node to scheduler cache
func (sc *SchedulerCache) AddNode(obj interface{}) {
node, ok := obj.(*v1.Node)
if !ok {
glog.Errorf("Cannot convert to *v1.Node: %v", obj)
return
}
sc.Mutex.Lock()
defer sc.Mutex.Unlock()
err := sc.addNode(node)
if err != nil {
glog.Errorf("Failed to add node %s into cache: %v", node.Name, err)
return
}
return
}
// UpdateNode update node to scheduler cache
func (sc *SchedulerCache) UpdateNode(oldObj, newObj interface{}) {
oldNode, ok := oldObj.(*v1.Node)
if !ok {
glog.Errorf("Cannot convert oldObj to *v1.Node: %v", oldObj)
return
}
newNode, ok := newObj.(*v1.Node)
if !ok {
glog.Errorf("Cannot convert newObj to *v1.Node: %v", newObj)
return
}
sc.Mutex.Lock()
defer sc.Mutex.Unlock()
err := sc.updateNode(oldNode, newNode)
if err != nil {
glog.Errorf("Failed to update node %v in cache: %v", oldNode.Name, err)
return
}
return
}
// DeleteNode delete node from scheduler cache
func (sc *SchedulerCache) DeleteNode(obj interface{}) {
var node *v1.Node
switch t := obj.(type) {
case *v1.Node:
node = t
case cache.DeletedFinalStateUnknown:
var ok bool
node, ok = t.Obj.(*v1.Node)
if !ok {
glog.Errorf("Cannot convert to *v1.Node: %v", t.Obj)
return
}
default:
glog.Errorf("Cannot convert to *v1.Node: %v", t)
return
}
sc.Mutex.Lock()
defer sc.Mutex.Unlock()
err := sc.deleteNode(node)
if err != nil {
glog.Errorf("Failed to delete node %s from cache: %v", node.Name, err)
return
}
return
}
func getJobID(pg *kbv1.PodGroup) kbapi.JobID {
return kbapi.JobID(fmt.Sprintf("%s/%s", pg.Namespace, pg.Name))
}
// Assumes that lock is already acquired.
func (sc *SchedulerCache) setPodGroup(ss *kbv1.PodGroup) error {
job := getJobID(ss)
if len(job) == 0 {
return fmt.Errorf("the identity of PodGroup is empty")
}
if _, found := sc.Jobs[job]; !found {
sc.Jobs[job] = kbapi.NewJobInfo(job)
}
sc.Jobs[job].SetPodGroup(ss)
// TODO(k82cn): set default queue in admission.
if len(ss.Spec.Queue) == 0 {
sc.Jobs[job].Queue = kbapi.QueueID(sc.defaultQueue)
}
return nil
}
// Assumes that lock is already acquired.
func (sc *SchedulerCache) updatePodGroup(oldQueue, newQueue *kbv1.PodGroup) error {
return sc.setPodGroup(newQueue)
}
// Assumes that lock is already acquired.
func (sc *SchedulerCache) deletePodGroup(ss *kbv1.PodGroup) error {
jobID := getJobID(ss)
job, found := sc.Jobs[jobID]
if !found {
return fmt.Errorf("can not found job %v:%v/%v", jobID, ss.Namespace, ss.Name)
}
// Unset SchedulingSpec
job.UnsetPodGroup()
sc.deleteJob(job)
return nil
}
// AddPodGroup add podgroup to scheduler cache
func (sc *SchedulerCache) AddPodGroup(obj interface{}) {
ss, ok := obj.(*kbv1.PodGroup)
if !ok {
glog.Errorf("Cannot convert to *kbv1.PodGroup: %v", obj)
return
}
sc.Mutex.Lock()
defer sc.Mutex.Unlock()
glog.V(4).Infof("Add PodGroup(%s) into cache, spec(%#v)", ss.Name, ss.Spec)
err := sc.setPodGroup(ss)
if err != nil {
glog.Errorf("Failed to add PodGroup %s into cache: %v", ss.Name, err)
return
}
return
}
// UpdatePodGroup add podgroup to scheduler cache
func (sc *SchedulerCache) UpdatePodGroup(oldObj, newObj interface{}) {
oldSS, ok := oldObj.(*kbv1.PodGroup)
if !ok {
glog.Errorf("Cannot convert oldObj to *kbv1.SchedulingSpec: %v", oldObj)
return
}
newSS, ok := newObj.(*kbv1.PodGroup)
if !ok {
glog.Errorf("Cannot convert newObj to *kbv1.SchedulingSpec: %v", newObj)
return
}
sc.Mutex.Lock()
defer sc.Mutex.Unlock()
err := sc.updatePodGroup(oldSS, newSS)
if err != nil {
glog.Errorf("Failed to update SchedulingSpec %s into cache: %v", oldSS.Name, err)
return
}
return
}
// DeletePodGroup delete podgroup from scheduler cache
func (sc *SchedulerCache) DeletePodGroup(obj interface{}) {
var ss *kbv1.PodGroup
switch t := obj.(type) {
case *kbv1.PodGroup:
ss = t
case cache.DeletedFinalStateUnknown:
var ok bool
ss, ok = t.Obj.(*kbv1.PodGroup)
if !ok {
glog.Errorf("Cannot convert to *kbv1.SchedulingSpec: %v", t.Obj)
return
}
default:
glog.Errorf("Cannot convert to *kbv1.SchedulingSpec: %v", t)
return
}
sc.Mutex.Lock()
defer sc.Mutex.Unlock()
err := sc.deletePodGroup(ss)
if err != nil {
glog.Errorf("Failed to delete SchedulingSpec %s from cache: %v", ss.Name, err)
return
}
return
}
// Assumes that lock is already acquired.
func (sc *SchedulerCache) setPDB(pdb *policyv1.PodDisruptionBudget) error {
job := kbapi.JobID(utils.GetController(pdb))
if len(job) == 0 {
return fmt.Errorf("the controller of PodDisruptionBudget is empty")
}
if _, found := sc.Jobs[job]; !found {
sc.Jobs[job] = kbapi.NewJobInfo(job)
}
sc.Jobs[job].SetPDB(pdb)
// Set it to default queue, as PDB did not support queue right now.
sc.Jobs[job].Queue = kbapi.QueueID(sc.defaultQueue)
return nil
}
// Assumes that lock is already acquired.
func (sc *SchedulerCache) updatePDB(oldPDB, newPDB *policyv1.PodDisruptionBudget) error {
return sc.setPDB(newPDB)
}
// Assumes that lock is already acquired.
func (sc *SchedulerCache) deletePDB(pdb *policyv1.PodDisruptionBudget) error {
jobID := kbapi.JobID(utils.GetController(pdb))
job, found := sc.Jobs[jobID]
if !found {
return fmt.Errorf("can not found job %v:%v/%v", jobID, pdb.Namespace, pdb.Name)
}
// Unset SchedulingSpec
job.UnsetPDB()
sc.deleteJob(job)
return nil
}
// AddPDB add pdb to scheduler cache
func (sc *SchedulerCache) AddPDB(obj interface{}) {
pdb, ok := obj.(*policyv1.PodDisruptionBudget)
if !ok {
glog.Errorf("Cannot convert to *policyv1.PodDisruptionBudget: %v", obj)
return
}
sc.Mutex.Lock()
defer sc.Mutex.Unlock()
err := sc.setPDB(pdb)
if err != nil {
glog.Errorf("Failed to add PodDisruptionBudget %s into cache: %v", pdb.Name, err)
return
}
return
}
//UpdatePDB update pdb to scheduler cache
func (sc *SchedulerCache) UpdatePDB(oldObj, newObj interface{}) {
oldPDB, ok := oldObj.(*policyv1.PodDisruptionBudget)
if !ok {
glog.Errorf("Cannot convert oldObj to *policyv1.PodDisruptionBudget: %v", oldObj)
return
}
newPDB, ok := newObj.(*policyv1.PodDisruptionBudget)
if !ok {
glog.Errorf("Cannot convert newObj to *policyv1.PodDisruptionBudget: %v", newObj)
return
}
sc.Mutex.Lock()
defer sc.Mutex.Unlock()
err := sc.updatePDB(oldPDB, newPDB)
if err != nil {
glog.Errorf("Failed to update PodDisruptionBudget %s into cache: %v", oldPDB.Name, err)
return
}
return
}
//DeletePDB delete pdb from scheduler cache
func (sc *SchedulerCache) DeletePDB(obj interface{}) {
var pdb *policyv1.PodDisruptionBudget
switch t := obj.(type) {
case *policyv1.PodDisruptionBudget:
pdb = t
case cache.DeletedFinalStateUnknown:
var ok bool
pdb, ok = t.Obj.(*policyv1.PodDisruptionBudget)
if !ok {
glog.Errorf("Cannot convert to *policyv1.PodDisruptionBudget: %v", t.Obj)
return
}
default:
glog.Errorf("Cannot convert to *policyv1.PodDisruptionBudget: %v", t)
return
}
sc.Mutex.Lock()
defer sc.Mutex.Unlock()
err := sc.deletePDB(pdb)
if err != nil {
glog.Errorf("Failed to delete PodDisruptionBudget %s from cache: %v", pdb.Name, err)
return
}
return
}
//AddQueue add queue to scheduler cache
func (sc *SchedulerCache) AddQueue(obj interface{}) {
ss, ok := obj.(*kbv1.Queue)
if !ok {
glog.Errorf("Cannot convert to *kbv1.Queue: %v", obj)
return
}
sc.Mutex.Lock()
defer sc.Mutex.Unlock()
glog.V(4).Infof("Add Queue(%s) into cache, spec(%#v)", ss.Name, ss.Spec)
err := sc.addQueue(ss)
if err != nil {
glog.Errorf("Failed to add Queue %s into cache: %v", ss.Name, err)
return
}
return
}
//UpdateQueue update queue to scheduler cache
func (sc *SchedulerCache) UpdateQueue(oldObj, newObj interface{}) {
oldSS, ok := oldObj.(*kbv1.Queue)
if !ok {
glog.Errorf("Cannot convert oldObj to *kbv1.Queue: %v", oldObj)
return
}
newSS, ok := newObj.(*kbv1.Queue)
if !ok {
glog.Errorf("Cannot convert newObj to *kbv1.Queue: %v", newObj)
return
}
sc.Mutex.Lock()
defer sc.Mutex.Unlock()
err := sc.updateQueue(oldSS, newSS)
if err != nil {
glog.Errorf("Failed to update Queue %s into cache: %v", oldSS.Name, err)
return
}
return
}
//DeleteQueue delete queue from the scheduler cache
func (sc *SchedulerCache) DeleteQueue(obj interface{}) {
var ss *kbv1.Queue
switch t := obj.(type) {
case *kbv1.Queue:
ss = t
case cache.DeletedFinalStateUnknown:
var ok bool
ss, ok = t.Obj.(*kbv1.Queue)
if !ok {
glog.Errorf("Cannot convert to *kbv1.Queue: %v", t.Obj)
return
}
default:
glog.Errorf("Cannot convert to *kbv1.Queue: %v", t)
return
}
sc.Mutex.Lock()
defer sc.Mutex.Unlock()
err := sc.deleteQueue(ss)
if err != nil {
glog.Errorf("Failed to delete Queue %s from cache: %v", ss.Name, err)
return
}
return
}
func (sc *SchedulerCache) addQueue(queue *kbv1.Queue) error {
qi := kbapi.NewQueueInfo(queue)
sc.Queues[qi.UID] = qi
return nil
}
func (sc *SchedulerCache) updateQueue(oldObj, newObj *kbv1.Queue) error {
sc.deleteQueue(oldObj)
sc.addQueue(newObj)
return nil
}
func (sc *SchedulerCache) deleteQueue(queue *kbv1.Queue) error {
qi := kbapi.NewQueueInfo(queue)
delete(sc.Queues, qi.UID)
return nil
}
//DeletePriorityClass delete priorityclass from the scheduler cache
func (sc *SchedulerCache) DeletePriorityClass(obj interface{}) {
var ss *v1beta1.PriorityClass
switch t := obj.(type) {
case *v1beta1.PriorityClass:
ss = t
case cache.DeletedFinalStateUnknown:
var ok bool
ss, ok = t.Obj.(*v1beta1.PriorityClass)
if !ok {
glog.Errorf("Cannot convert to *v1beta1.PriorityClass: %v", t.Obj)
return
}
default:
glog.Errorf("Cannot convert to *v1beta1.PriorityClass: %v", t)
return
}
sc.Mutex.Lock()
defer sc.Mutex.Unlock()
sc.deletePriorityClass(ss)
}
//UpdatePriorityClass update priorityclass to scheduler cache
func (sc *SchedulerCache) UpdatePriorityClass(oldObj, newObj interface{}) {
oldSS, ok := oldObj.(*v1beta1.PriorityClass)
if !ok {
glog.Errorf("Cannot convert oldObj to *v1beta1.PriorityClass: %v", oldObj)
return
}
newSS, ok := newObj.(*v1beta1.PriorityClass)
if !ok {
glog.Errorf("Cannot convert newObj to *v1beta1.PriorityClass: %v", newObj)
return
}
sc.Mutex.Lock()
defer sc.Mutex.Unlock()
sc.deletePriorityClass(oldSS)
sc.addPriorityClass(newSS)
}
//AddPriorityClass add priorityclass to scheduler cache
func (sc *SchedulerCache) AddPriorityClass(obj interface{}) {
var ss *v1beta1.PriorityClass
switch t := obj.(type) {
case *v1beta1.PriorityClass:
ss = t
case cache.DeletedFinalStateUnknown:
var ok bool
ss, ok = t.Obj.(*v1beta1.PriorityClass)
if !ok {
glog.Errorf("Cannot convert to *v1beta1.PriorityClass: %v", t.Obj)
return
}
default:
glog.Errorf("Cannot convert to *v1beta1.PriorityClass: %v", t)
return
}
sc.Mutex.Lock()
defer sc.Mutex.Unlock()
sc.addPriorityClass(ss)
}
func (sc *SchedulerCache) deletePriorityClass(pc *v1beta1.PriorityClass) {
if pc.GlobalDefault {
sc.defaultPriorityClass = nil
sc.defaultPriority = 0
}
delete(sc.PriorityClasses, pc.Name)
}
func (sc *SchedulerCache) addPriorityClass(pc *v1beta1.PriorityClass) {
if pc.GlobalDefault {
if sc.defaultPriorityClass != nil {
glog.Errorf("Updated default priority class from <%s> to <%s> forcefully.",
sc.defaultPriorityClass.Name, pc.Name)
}
sc.defaultPriorityClass = pc
sc.defaultPriority = pc.Value
}
sc.PriorityClasses[pc.Name] = pc
}

View File

@ -0,0 +1,78 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api"
v1 "k8s.io/api/core/v1"
)
// Cache collects pods/nodes/queues information
// and provides information snapshot
type Cache interface {
// Run start informer
Run(stopCh <-chan struct{})
// Snapshot deep copy overall cache information into snapshot
Snapshot() *api.ClusterInfo
// WaitForCacheSync waits for all cache synced
WaitForCacheSync(stopCh <-chan struct{}) bool
// Bind binds Task to the target host.
// TODO(jinzhej): clean up expire Tasks.
Bind(task *api.TaskInfo, hostname string) error
// Evict evicts the task to release resources.
Evict(task *api.TaskInfo, reason string) error
// RecordJobStatusEvent records related events according to job status.
// Deprecated: remove it after removed PDB support.
RecordJobStatusEvent(job *api.JobInfo)
// UpdateJobStatus puts job in backlog for a while.
UpdateJobStatus(job *api.JobInfo) (*api.JobInfo, error)
// AllocateVolumes allocates volume on the host to the task
AllocateVolumes(task *api.TaskInfo, hostname string) error
// BindVolumes binds volumes to the task
BindVolumes(task *api.TaskInfo) error
}
// VolumeBinder interface for allocate and bind volumes
type VolumeBinder interface {
AllocateVolumes(task *api.TaskInfo, hostname string) error
BindVolumes(task *api.TaskInfo) error
}
//Binder interface for binding task and hostname
type Binder interface {
Bind(task *v1.Pod, hostname string) error
}
// Evictor interface for evict pods
type Evictor interface {
Evict(pod *v1.Pod) error
}
// StatusUpdater updates pod with given PodCondition
type StatusUpdater interface {
UpdatePodCondition(pod *v1.Pod, podCondition *v1.PodCondition) (*v1.Pod, error)
UpdatePodGroup(pg *v1alpha1.PodGroup) (*v1alpha1.PodGroup, error)
}

View File

@ -0,0 +1,60 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1"
"github.com/kubernetes-sigs/kube-batch/pkg/apis/utils"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api"
)
const (
shadowPodGroupKey = "volcano/shadow-pod-group"
)
func shadowPodGroup(pg *v1alpha1.PodGroup) bool {
if pg == nil {
return true
}
_, found := pg.Annotations[shadowPodGroupKey]
return found
}
func createShadowPodGroup(pod *v1.Pod) *v1alpha1.PodGroup {
jobID := api.JobID(utils.GetController(pod))
if len(jobID) == 0 {
jobID = api.JobID(pod.UID)
}
return &v1alpha1.PodGroup{
ObjectMeta: metav1.ObjectMeta{
Namespace: pod.Namespace,
Name: string(jobID),
Annotations: map[string]string{
shadowPodGroupKey: string(jobID),
},
},
Spec: v1alpha1.PodGroupSpec{
MinMember: 1,
},
}
}

View File

@ -0,0 +1,56 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package conf
// SchedulerConfiguration defines the configuration of scheduler.
type SchedulerConfiguration struct {
// Actions defines the actions list of scheduler in order
Actions string `yaml:"actions"`
// Tiers defines plugins in different tiers
Tiers []Tier `yaml:"tiers"`
}
// Tier defines plugin tier
type Tier struct {
Plugins []PluginOption `yaml:"plugins"`
}
// PluginOption defines the options of plugin
type PluginOption struct {
// The name of Plugin
Name string `yaml:"name"`
// EnabledJobOrder defines whether jobOrderFn is enabled
EnabledJobOrder *bool `yaml:"enableJobOrder"`
// EnabledJobReady defines whether jobReadyFn is enabled
EnabledJobReady *bool `yaml:"enableJobReady"`
// EnabledJobPipelined defines whether jobPipelinedFn is enabled
EnabledJobPipelined *bool `yaml:"enableJobPipelined"`
// EnabledTaskOrder defines whether taskOrderFn is enabled
EnabledTaskOrder *bool `yaml:"enableTaskOrder"`
// EnabledPreemptable defines whether preemptableFn is enabled
EnabledPreemptable *bool `yaml:"enablePreemptable"`
// EnabledReclaimable defines whether reclaimableFn is enabled
EnabledReclaimable *bool `yaml:"enableReclaimable"`
// EnabledQueueOrder defines whether queueOrderFn is enabled
EnabledQueueOrder *bool `yaml:"enableQueueOrder"`
// EnabledPredicate defines whether predicateFn is enabled
EnabledPredicate *bool `yaml:"enablePredicate"`
// EnabledNodeOrder defines whether NodeOrderFn is enabled
EnabledNodeOrder *bool `yaml:"enableNodeOrder"`
// Arguments defines the different arguments that can be given to different plugins
Arguments map[string]string `yaml:"arguments"`
}

View File

@ -0,0 +1,46 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"strconv"
"github.com/golang/glog"
)
// Arguments map
type Arguments map[string]string
//GetInt get the integer value from string
func (a Arguments) GetInt(ptr *int, key string) {
if ptr == nil {
return
}
argv, ok := a[key]
if !ok || argv == "" {
return
}
value, err := strconv.Atoi(argv)
if err != nil {
glog.Warningf("Could not parse argument: %s for key %s, with err %v", argv, key, err)
return
}
*ptr = value
}

View File

@ -0,0 +1,32 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api"
)
// Event structure
type Event struct {
Task *api.TaskInfo
}
// EventHandler structure
type EventHandler struct {
AllocateFunc func(event *Event)
DeallocateFunc func(event *Event)
}

View File

@ -0,0 +1,63 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"time"
"github.com/golang/glog"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/cache"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/conf"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/metrics"
)
// OpenSession start the session
func OpenSession(cache cache.Cache, tiers []conf.Tier) *Session {
ssn := openSession(cache)
ssn.Tiers = tiers
for _, tier := range tiers {
for _, plugin := range tier.Plugins {
if pb, found := GetPluginBuilder(plugin.Name); !found {
glog.Errorf("Failed to get plugin %s.", plugin.Name)
} else {
plugin := pb(plugin.Arguments)
ssn.plugins[plugin.Name()] = plugin
}
}
}
for _, plugin := range ssn.plugins {
onSessionOpenStart := time.Now()
plugin.OnSessionOpen(ssn)
metrics.UpdatePluginDuration(plugin.Name(), metrics.OnSessionOpen, metrics.Duration(onSessionOpenStart))
}
return ssn
}
// CloseSession close the session
func CloseSession(ssn *Session) {
for _, plugin := range ssn.plugins {
onSessionCloseStart := time.Now()
plugin.OnSessionClose(ssn)
metrics.UpdatePluginDuration(plugin.Name(), metrics.OnSessionClose, metrics.Duration(onSessionCloseStart))
}
closeSession(ssn)
}

View File

@ -0,0 +1,41 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
// Action is the interface of scheduler action.
type Action interface {
// The unique name of Action.
Name() string
// Initialize initializes the allocator plugins.
Initialize()
// Execute allocates the cluster's resources into each queue.
Execute(ssn *Session)
// UnIntialize un-initializes the allocator plugins.
UnInitialize()
}
// Plugin is the interface of scheduler plugin
type Plugin interface {
// The unique name of Plugin.
Name() string
OnSessionOpen(ssn *Session)
OnSessionClose(ssn *Session)
}

View File

@ -0,0 +1,72 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import "sync"
var pluginMutex sync.Mutex
// PluginBuilder plugin management
type PluginBuilder func(Arguments) Plugin
// Plugin management
var pluginBuilders = map[string]PluginBuilder{}
// RegisterPluginBuilder register the plugin
func RegisterPluginBuilder(name string, pc PluginBuilder) {
pluginMutex.Lock()
defer pluginMutex.Unlock()
pluginBuilders[name] = pc
}
// CleanupPluginBuilders cleans up all the plugin
func CleanupPluginBuilders() {
pluginMutex.Lock()
defer pluginMutex.Unlock()
pluginBuilders = map[string]PluginBuilder{}
}
// GetPluginBuilder get the pluginbuilder by name
func GetPluginBuilder(name string) (PluginBuilder, bool) {
pluginMutex.Lock()
defer pluginMutex.Unlock()
pb, found := pluginBuilders[name]
return pb, found
}
// Action management
var actionMap = map[string]Action{}
// RegisterAction register action
func RegisterAction(act Action) {
pluginMutex.Lock()
defer pluginMutex.Unlock()
actionMap[act.Name()] = act
}
// GetAction get the action by name
func GetAction(name string) (Action, bool) {
pluginMutex.Lock()
defer pluginMutex.Unlock()
act, found := actionMap[name]
return act, found
}

View File

@ -0,0 +1,404 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"fmt"
"github.com/golang/glog"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
"github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/cache"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/conf"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/metrics"
)
// Session information for the current session
type Session struct {
UID types.UID
cache cache.Cache
Jobs map[api.JobID]*api.JobInfo
Nodes map[string]*api.NodeInfo
Queues map[api.QueueID]*api.QueueInfo
Backlog []*api.JobInfo
Tiers []conf.Tier
plugins map[string]Plugin
eventHandlers []*EventHandler
jobOrderFns map[string]api.CompareFn
queueOrderFns map[string]api.CompareFn
taskOrderFns map[string]api.CompareFn
predicateFns map[string]api.PredicateFn
nodeOrderFns map[string]api.NodeOrderFn
preemptableFns map[string]api.EvictableFn
reclaimableFns map[string]api.EvictableFn
overusedFns map[string]api.ValidateFn
jobReadyFns map[string]api.ValidateFn
jobPipelinedFns map[string]api.ValidateFn
jobValidFns map[string]api.ValidateExFn
}
func openSession(cache cache.Cache) *Session {
ssn := &Session{
UID: uuid.NewUUID(),
cache: cache,
Jobs: map[api.JobID]*api.JobInfo{},
Nodes: map[string]*api.NodeInfo{},
Queues: map[api.QueueID]*api.QueueInfo{},
plugins: map[string]Plugin{},
jobOrderFns: map[string]api.CompareFn{},
queueOrderFns: map[string]api.CompareFn{},
taskOrderFns: map[string]api.CompareFn{},
predicateFns: map[string]api.PredicateFn{},
nodeOrderFns: map[string]api.NodeOrderFn{},
preemptableFns: map[string]api.EvictableFn{},
reclaimableFns: map[string]api.EvictableFn{},
overusedFns: map[string]api.ValidateFn{},
jobReadyFns: map[string]api.ValidateFn{},
jobPipelinedFns: map[string]api.ValidateFn{},
jobValidFns: map[string]api.ValidateExFn{},
}
snapshot := cache.Snapshot()
ssn.Jobs = snapshot.Jobs
for _, job := range ssn.Jobs {
if vjr := ssn.JobValid(job); vjr != nil {
if !vjr.Pass {
jc := &v1alpha1.PodGroupCondition{
Type: v1alpha1.PodGroupUnschedulableType,
Status: v1.ConditionTrue,
LastTransitionTime: metav1.Now(),
TransitionID: string(ssn.UID),
Reason: vjr.Reason,
Message: vjr.Message,
}
if err := ssn.UpdateJobCondition(job, jc); err != nil {
glog.Errorf("Failed to update job condition: %v", err)
}
}
delete(ssn.Jobs, job.UID)
}
}
ssn.Nodes = snapshot.Nodes
ssn.Queues = snapshot.Queues
glog.V(3).Infof("Open Session %v with <%d> Job and <%d> Queues",
ssn.UID, len(ssn.Jobs), len(ssn.Queues))
return ssn
}
func closeSession(ssn *Session) {
for _, job := range ssn.Jobs {
// If job is using PDB, ignore it.
// TODO(k82cn): remove it when removing PDB support
if job.PodGroup == nil {
ssn.cache.RecordJobStatusEvent(job)
continue
}
job.PodGroup.Status = jobStatus(ssn, job)
if _, err := ssn.cache.UpdateJobStatus(job); err != nil {
glog.Errorf("Failed to update job <%s/%s>: %v",
job.Namespace, job.Name, err)
}
}
ssn.Jobs = nil
ssn.Nodes = nil
ssn.Backlog = nil
ssn.plugins = nil
ssn.eventHandlers = nil
ssn.jobOrderFns = nil
ssn.queueOrderFns = nil
glog.V(3).Infof("Close Session %v", ssn.UID)
}
func jobStatus(ssn *Session, jobInfo *api.JobInfo) v1alpha1.PodGroupStatus {
status := jobInfo.PodGroup.Status
unschedulable := false
for _, c := range status.Conditions {
if c.Type == v1alpha1.PodGroupUnschedulableType &&
c.Status == v1.ConditionTrue &&
c.TransitionID == string(ssn.UID) {
unschedulable = true
break
}
}
// If running tasks && unschedulable, unknown phase
if len(jobInfo.TaskStatusIndex[api.Running]) != 0 && unschedulable {
status.Phase = v1alpha1.PodGroupUnknown
} else {
allocated := 0
for status, tasks := range jobInfo.TaskStatusIndex {
if api.AllocatedStatus(status) {
allocated += len(tasks)
}
}
// If there're enough allocated resource, it's running
if int32(allocated) > jobInfo.PodGroup.Spec.MinMember {
status.Phase = v1alpha1.PodGroupRunning
} else if jobInfo.PodGroup.Status.Phase != v1alpha1.PodGroupInqueue {
status.Phase = v1alpha1.PodGroupPending
}
}
status.Running = int32(len(jobInfo.TaskStatusIndex[api.Running]))
status.Failed = int32(len(jobInfo.TaskStatusIndex[api.Failed]))
status.Succeeded = int32(len(jobInfo.TaskStatusIndex[api.Succeeded]))
return status
}
// Statement returns new statement object
func (ssn *Session) Statement() *Statement {
return &Statement{
ssn: ssn,
}
}
// Pipeline the task to the node in the session
func (ssn *Session) Pipeline(task *api.TaskInfo, hostname string) error {
// Only update status in session
job, found := ssn.Jobs[task.Job]
if found {
if err := job.UpdateTaskStatus(task, api.Pipelined); err != nil {
glog.Errorf("Failed to update task <%v/%v> status to %v in Session <%v>: %v",
task.Namespace, task.Name, api.Pipelined, ssn.UID, err)
return err
}
} else {
glog.Errorf("Failed to found Job <%s> in Session <%s> index when binding.",
task.Job, ssn.UID)
return fmt.Errorf("failed to find job %s when binding", task.Job)
}
task.NodeName = hostname
if node, found := ssn.Nodes[hostname]; found {
if err := node.AddTask(task); err != nil {
glog.Errorf("Failed to add task <%v/%v> to node <%v> in Session <%v>: %v",
task.Namespace, task.Name, hostname, ssn.UID, err)
return err
}
glog.V(3).Infof("After added Task <%v/%v> to Node <%v>: idle <%v>, used <%v>, releasing <%v>",
task.Namespace, task.Name, node.Name, node.Idle, node.Used, node.Releasing)
} else {
glog.Errorf("Failed to found Node <%s> in Session <%s> index when binding.",
hostname, ssn.UID)
return fmt.Errorf("failed to find node %s", hostname)
}
for _, eh := range ssn.eventHandlers {
if eh.AllocateFunc != nil {
eh.AllocateFunc(&Event{
Task: task,
})
}
}
return nil
}
//Allocate the task to the node in the session
func (ssn *Session) Allocate(task *api.TaskInfo, hostname string) error {
if err := ssn.cache.AllocateVolumes(task, hostname); err != nil {
return err
}
// Only update status in session
job, found := ssn.Jobs[task.Job]
if found {
if err := job.UpdateTaskStatus(task, api.Allocated); err != nil {
glog.Errorf("Failed to update task <%v/%v> status to %v in Session <%v>: %v",
task.Namespace, task.Name, api.Allocated, ssn.UID, err)
return err
}
} else {
glog.Errorf("Failed to found Job <%s> in Session <%s> index when binding.",
task.Job, ssn.UID)
return fmt.Errorf("failed to find job %s", task.Job)
}
task.NodeName = hostname
if node, found := ssn.Nodes[hostname]; found {
if err := node.AddTask(task); err != nil {
glog.Errorf("Failed to add task <%v/%v> to node <%v> in Session <%v>: %v",
task.Namespace, task.Name, hostname, ssn.UID, err)
return err
}
glog.V(3).Infof("After allocated Task <%v/%v> to Node <%v>: idle <%v>, used <%v>, releasing <%v>",
task.Namespace, task.Name, node.Name, node.Idle, node.Used, node.Releasing)
} else {
glog.Errorf("Failed to found Node <%s> in Session <%s> index when binding.",
hostname, ssn.UID)
return fmt.Errorf("failed to find node %s", hostname)
}
// Callbacks
for _, eh := range ssn.eventHandlers {
if eh.AllocateFunc != nil {
eh.AllocateFunc(&Event{
Task: task,
})
}
}
if ssn.JobReady(job) {
for _, task := range job.TaskStatusIndex[api.Allocated] {
if err := ssn.dispatch(task); err != nil {
glog.Errorf("Failed to dispatch task <%v/%v>: %v",
task.Namespace, task.Name, err)
return err
}
}
}
return nil
}
func (ssn *Session) dispatch(task *api.TaskInfo) error {
if err := ssn.cache.BindVolumes(task); err != nil {
return err
}
if err := ssn.cache.Bind(task, task.NodeName); err != nil {
return err
}
// Update status in session
if job, found := ssn.Jobs[task.Job]; found {
if err := job.UpdateTaskStatus(task, api.Binding); err != nil {
glog.Errorf("Failed to update task <%v/%v> status to %v in Session <%v>: %v",
task.Namespace, task.Name, api.Binding, ssn.UID, err)
return err
}
} else {
glog.Errorf("Failed to found Job <%s> in Session <%s> index when binding.",
task.Job, ssn.UID)
return fmt.Errorf("failed to find job %s", task.Job)
}
metrics.UpdateTaskScheduleDuration(metrics.Duration(task.Pod.CreationTimestamp.Time))
return nil
}
//Evict the task in the session
func (ssn *Session) Evict(reclaimee *api.TaskInfo, reason string) error {
if err := ssn.cache.Evict(reclaimee, reason); err != nil {
return err
}
// Update status in session
job, found := ssn.Jobs[reclaimee.Job]
if found {
if err := job.UpdateTaskStatus(reclaimee, api.Releasing); err != nil {
glog.Errorf("Failed to update task <%v/%v> status to %v in Session <%v>: %v",
reclaimee.Namespace, reclaimee.Name, api.Releasing, ssn.UID, err)
return err
}
} else {
glog.Errorf("Failed to found Job <%s> in Session <%s> index when binding.",
reclaimee.Job, ssn.UID)
return fmt.Errorf("failed to find job %s", reclaimee.Job)
}
// Update task in node.
if node, found := ssn.Nodes[reclaimee.NodeName]; found {
if err := node.UpdateTask(reclaimee); err != nil {
glog.Errorf("Failed to update task <%v/%v> in Session <%v>: %v",
reclaimee.Namespace, reclaimee.Name, ssn.UID, err)
return err
}
}
for _, eh := range ssn.eventHandlers {
if eh.DeallocateFunc != nil {
eh.DeallocateFunc(&Event{
Task: reclaimee,
})
}
}
return nil
}
// UpdateJobCondition update job condition accordingly.
func (ssn *Session) UpdateJobCondition(jobInfo *api.JobInfo, cond *v1alpha1.PodGroupCondition) error {
job, ok := ssn.Jobs[jobInfo.UID]
if !ok {
return fmt.Errorf("failed to find job <%s/%s>", jobInfo.Namespace, jobInfo.Name)
}
index := -1
for i, c := range job.PodGroup.Status.Conditions {
if c.Type == cond.Type {
index = i
break
}
}
// Update condition to the new condition.
if index < 0 {
job.PodGroup.Status.Conditions = append(job.PodGroup.Status.Conditions, *cond)
} else {
job.PodGroup.Status.Conditions[index] = *cond
}
return nil
}
// AddEventHandler add event handlers
func (ssn *Session) AddEventHandler(eh *EventHandler) {
ssn.eventHandlers = append(ssn.eventHandlers, eh)
}
//String return nodes and jobs information in the session
func (ssn Session) String() string {
msg := fmt.Sprintf("Session %v: \n", ssn.UID)
for _, job := range ssn.Jobs {
msg = fmt.Sprintf("%s%v\n", msg, job)
}
for _, node := range ssn.Nodes {
msg = fmt.Sprintf("%s%v\n", msg, node)
}
return msg
}

View File

@ -0,0 +1,377 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api"
)
// AddJobOrderFn add job order function
func (ssn *Session) AddJobOrderFn(name string, cf api.CompareFn) {
ssn.jobOrderFns[name] = cf
}
// AddQueueOrderFn add queue order function
func (ssn *Session) AddQueueOrderFn(name string, qf api.CompareFn) {
ssn.queueOrderFns[name] = qf
}
// AddTaskOrderFn add task order function
func (ssn *Session) AddTaskOrderFn(name string, cf api.CompareFn) {
ssn.taskOrderFns[name] = cf
}
// AddPreemptableFn add preemptable function
func (ssn *Session) AddPreemptableFn(name string, cf api.EvictableFn) {
ssn.preemptableFns[name] = cf
}
// AddReclaimableFn add Reclaimable function
func (ssn *Session) AddReclaimableFn(name string, rf api.EvictableFn) {
ssn.reclaimableFns[name] = rf
}
// AddJobReadyFn add JobReady function
func (ssn *Session) AddJobReadyFn(name string, vf api.ValidateFn) {
ssn.jobReadyFns[name] = vf
}
// AddJobPipelinedFn add pipelined function
func (ssn *Session) AddJobPipelinedFn(name string, vf api.ValidateFn) {
ssn.jobPipelinedFns[name] = vf
}
// AddPredicateFn add Predicate function
func (ssn *Session) AddPredicateFn(name string, pf api.PredicateFn) {
ssn.predicateFns[name] = pf
}
// AddNodeOrderFn add Node order function
func (ssn *Session) AddNodeOrderFn(name string, pf api.NodeOrderFn) {
ssn.nodeOrderFns[name] = pf
}
// AddOverusedFn add overused function
func (ssn *Session) AddOverusedFn(name string, fn api.ValidateFn) {
ssn.overusedFns[name] = fn
}
// AddJobValidFn add jobvalid function
func (ssn *Session) AddJobValidFn(name string, fn api.ValidateExFn) {
ssn.jobValidFns[name] = fn
}
// Reclaimable invoke reclaimable function of the plugins
func (ssn *Session) Reclaimable(reclaimer *api.TaskInfo, reclaimees []*api.TaskInfo) []*api.TaskInfo {
var victims []*api.TaskInfo
var init bool
for _, tier := range ssn.Tiers {
for _, plugin := range tier.Plugins {
if !isEnabled(plugin.EnabledReclaimable) {
continue
}
rf, found := ssn.reclaimableFns[plugin.Name]
if !found {
continue
}
candidates := rf(reclaimer, reclaimees)
if !init {
victims = candidates
init = true
} else {
var intersection []*api.TaskInfo
// Get intersection of victims and candidates.
for _, v := range victims {
for _, c := range candidates {
if v.UID == c.UID {
intersection = append(intersection, v)
}
}
}
// Update victims to intersection
victims = intersection
}
}
// Plugins in this tier made decision if victims is not nil
if victims != nil {
return victims
}
}
return victims
}
// Preemptable invoke preemptable function of the plugins
func (ssn *Session) Preemptable(preemptor *api.TaskInfo, preemptees []*api.TaskInfo) []*api.TaskInfo {
var victims []*api.TaskInfo
var init bool
for _, tier := range ssn.Tiers {
for _, plugin := range tier.Plugins {
if !isEnabled(plugin.EnabledPreemptable) {
continue
}
pf, found := ssn.preemptableFns[plugin.Name]
if !found {
continue
}
candidates := pf(preemptor, preemptees)
if !init {
victims = candidates
init = true
} else {
var intersection []*api.TaskInfo
// Get intersection of victims and candidates.
for _, v := range victims {
for _, c := range candidates {
if v.UID == c.UID {
intersection = append(intersection, v)
}
}
}
// Update victims to intersection
victims = intersection
}
}
// Plugins in this tier made decision if victims is not nil
if victims != nil {
return victims
}
}
return victims
}
// Overused invoke overused function of the plugins
func (ssn *Session) Overused(queue *api.QueueInfo) bool {
for _, tier := range ssn.Tiers {
for _, plugin := range tier.Plugins {
of, found := ssn.overusedFns[plugin.Name]
if !found {
continue
}
if of(queue) {
return true
}
}
}
return false
}
// JobReady invoke jobready function of the plugins
func (ssn *Session) JobReady(obj interface{}) bool {
for _, tier := range ssn.Tiers {
for _, plugin := range tier.Plugins {
if !isEnabled(plugin.EnabledJobReady) {
continue
}
jrf, found := ssn.jobReadyFns[plugin.Name]
if !found {
continue
}
if !jrf(obj) {
return false
}
}
}
return true
}
// JobPipelined invoke pipelined function of the plugins
func (ssn *Session) JobPipelined(obj interface{}) bool {
for _, tier := range ssn.Tiers {
for _, plugin := range tier.Plugins {
if !isEnabled(plugin.EnabledJobPipelined) {
continue
}
jrf, found := ssn.jobPipelinedFns[plugin.Name]
if !found {
continue
}
if !jrf(obj) {
return false
}
}
}
return true
}
// JobValid invoke jobvalid function of the plugins
func (ssn *Session) JobValid(obj interface{}) *api.ValidateResult {
for _, tier := range ssn.Tiers {
for _, plugin := range tier.Plugins {
jrf, found := ssn.jobValidFns[plugin.Name]
if !found {
continue
}
if vr := jrf(obj); vr != nil && !vr.Pass {
return vr
}
}
}
return nil
}
// JobOrderFn invoke joborder function of the plugins
func (ssn *Session) JobOrderFn(l, r interface{}) bool {
for _, tier := range ssn.Tiers {
for _, plugin := range tier.Plugins {
if !isEnabled(plugin.EnabledJobOrder) {
continue
}
jof, found := ssn.jobOrderFns[plugin.Name]
if !found {
continue
}
if j := jof(l, r); j != 0 {
return j < 0
}
}
}
// If no job order funcs, order job by CreationTimestamp first, then by UID.
lv := l.(*api.JobInfo)
rv := r.(*api.JobInfo)
if lv.CreationTimestamp.Equal(&rv.CreationTimestamp) {
return lv.UID < rv.UID
}
return lv.CreationTimestamp.Before(&rv.CreationTimestamp)
}
// QueueOrderFn invoke queueorder function of the plugins
func (ssn *Session) QueueOrderFn(l, r interface{}) bool {
for _, tier := range ssn.Tiers {
for _, plugin := range tier.Plugins {
if !isEnabled(plugin.EnabledQueueOrder) {
continue
}
qof, found := ssn.queueOrderFns[plugin.Name]
if !found {
continue
}
if j := qof(l, r); j != 0 {
return j < 0
}
}
}
// If no queue order funcs, order queue by CreationTimestamp first, then by UID.
lv := l.(*api.QueueInfo)
rv := r.(*api.QueueInfo)
if lv.Queue.CreationTimestamp.Equal(&rv.Queue.CreationTimestamp) {
return lv.UID < rv.UID
}
return lv.Queue.CreationTimestamp.Before(&rv.Queue.CreationTimestamp)
}
// TaskCompareFns invoke taskorder function of the plugins
func (ssn *Session) TaskCompareFns(l, r interface{}) int {
for _, tier := range ssn.Tiers {
for _, plugin := range tier.Plugins {
if !isEnabled(plugin.EnabledTaskOrder) {
continue
}
tof, found := ssn.taskOrderFns[plugin.Name]
if !found {
continue
}
if j := tof(l, r); j != 0 {
return j
}
}
}
return 0
}
// TaskOrderFn invoke taskorder function of the plugins
func (ssn *Session) TaskOrderFn(l, r interface{}) bool {
if res := ssn.TaskCompareFns(l, r); res != 0 {
return res < 0
}
// If no task order funcs, order task by CreationTimestamp first, then by UID.
lv := l.(*api.TaskInfo)
rv := r.(*api.TaskInfo)
if lv.Pod.CreationTimestamp.Equal(&rv.Pod.CreationTimestamp) {
return lv.UID < rv.UID
}
return lv.Pod.CreationTimestamp.Before(&rv.Pod.CreationTimestamp)
}
// PredicateFn invoke predicate function of the plugins
func (ssn *Session) PredicateFn(task *api.TaskInfo, node *api.NodeInfo) error {
for _, tier := range ssn.Tiers {
for _, plugin := range tier.Plugins {
if !isEnabled(plugin.EnabledPredicate) {
continue
}
pfn, found := ssn.predicateFns[plugin.Name]
if !found {
continue
}
err := pfn(task, node)
if err != nil {
return err
}
}
}
return nil
}
// NodeOrderFn invoke node order function of the plugins
func (ssn *Session) NodeOrderFn(task *api.TaskInfo, node *api.NodeInfo) (float64, error) {
priorityScore := 0.0
for _, tier := range ssn.Tiers {
for _, plugin := range tier.Plugins {
if !isEnabled(plugin.EnabledNodeOrder) {
continue
}
pfn, found := ssn.nodeOrderFns[plugin.Name]
if !found {
continue
}
score, err := pfn(task, node)
if err != nil {
return 0, err
}
priorityScore = priorityScore + score
}
}
return priorityScore, nil
}
func isEnabled(enabled *bool) bool {
return enabled != nil && *enabled
}

View File

@ -0,0 +1,222 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"github.com/golang/glog"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api"
)
// Statement structure
type Statement struct {
operations []operation
ssn *Session
}
type operation struct {
name string
args []interface{}
}
//Evict the pod
func (s *Statement) Evict(reclaimee *api.TaskInfo, reason string) error {
// Update status in session
job, found := s.ssn.Jobs[reclaimee.Job]
if found {
if err := job.UpdateTaskStatus(reclaimee, api.Releasing); err != nil {
glog.Errorf("Failed to update task <%v/%v> status to %v in Session <%v>: %v",
reclaimee.Namespace, reclaimee.Name, api.Releasing, s.ssn.UID, err)
}
} else {
glog.Errorf("Failed to found Job <%s> in Session <%s> index when binding.",
reclaimee.Job, s.ssn.UID)
}
// Update task in node.
if node, found := s.ssn.Nodes[reclaimee.NodeName]; found {
node.UpdateTask(reclaimee)
}
for _, eh := range s.ssn.eventHandlers {
if eh.DeallocateFunc != nil {
eh.DeallocateFunc(&Event{
Task: reclaimee,
})
}
}
s.operations = append(s.operations, operation{
name: "evict",
args: []interface{}{reclaimee, reason},
})
return nil
}
func (s *Statement) evict(reclaimee *api.TaskInfo, reason string) error {
if err := s.ssn.cache.Evict(reclaimee, reason); err != nil {
if e := s.unevict(reclaimee, reason); err != nil {
glog.Errorf("Faled to unevict task <%v/%v>: %v.",
reclaimee.Namespace, reclaimee.Name, e)
}
return err
}
return nil
}
func (s *Statement) unevict(reclaimee *api.TaskInfo, reason string) error {
// Update status in session
job, found := s.ssn.Jobs[reclaimee.Job]
if found {
if err := job.UpdateTaskStatus(reclaimee, api.Running); err != nil {
glog.Errorf("Failed to update task <%v/%v> status to %v in Session <%v>: %v",
reclaimee.Namespace, reclaimee.Name, api.Releasing, s.ssn.UID, err)
}
} else {
glog.Errorf("Failed to found Job <%s> in Session <%s> index when binding.",
reclaimee.Job, s.ssn.UID)
}
// Update task in node.
if node, found := s.ssn.Nodes[reclaimee.NodeName]; found {
node.AddTask(reclaimee)
}
for _, eh := range s.ssn.eventHandlers {
if eh.AllocateFunc != nil {
eh.AllocateFunc(&Event{
Task: reclaimee,
})
}
}
return nil
}
// Pipeline the task for the node
func (s *Statement) Pipeline(task *api.TaskInfo, hostname string) error {
// Only update status in session
job, found := s.ssn.Jobs[task.Job]
if found {
if err := job.UpdateTaskStatus(task, api.Pipelined); err != nil {
glog.Errorf("Failed to update task <%v/%v> status to %v in Session <%v>: %v",
task.Namespace, task.Name, api.Pipelined, s.ssn.UID, err)
}
} else {
glog.Errorf("Failed to found Job <%s> in Session <%s> index when binding.",
task.Job, s.ssn.UID)
}
task.NodeName = hostname
if node, found := s.ssn.Nodes[hostname]; found {
if err := node.AddTask(task); err != nil {
glog.Errorf("Failed to pipeline task <%v/%v> to node <%v> in Session <%v>: %v",
task.Namespace, task.Name, hostname, s.ssn.UID, err)
}
glog.V(3).Infof("After pipelined Task <%v/%v> to Node <%v>: idle <%v>, used <%v>, releasing <%v>",
task.Namespace, task.Name, node.Name, node.Idle, node.Used, node.Releasing)
} else {
glog.Errorf("Failed to found Node <%s> in Session <%s> index when binding.",
hostname, s.ssn.UID)
}
for _, eh := range s.ssn.eventHandlers {
if eh.AllocateFunc != nil {
eh.AllocateFunc(&Event{
Task: task,
})
}
}
s.operations = append(s.operations, operation{
name: "pipeline",
args: []interface{}{task, hostname},
})
return nil
}
func (s *Statement) pipeline(task *api.TaskInfo) {
}
func (s *Statement) unpipeline(task *api.TaskInfo) error {
// Only update status in session
job, found := s.ssn.Jobs[task.Job]
if found {
if err := job.UpdateTaskStatus(task, api.Pending); err != nil {
glog.Errorf("Failed to update task <%v/%v> status to %v in Session <%v>: %v",
task.Namespace, task.Name, api.Pipelined, s.ssn.UID, err)
}
} else {
glog.Errorf("Failed to found Job <%s> in Session <%s> index when binding.",
task.Job, s.ssn.UID)
}
hostname := task.NodeName
if node, found := s.ssn.Nodes[hostname]; found {
if err := node.RemoveTask(task); err != nil {
glog.Errorf("Failed to pipeline task <%v/%v> to node <%v> in Session <%v>: %v",
task.Namespace, task.Name, hostname, s.ssn.UID, err)
}
glog.V(3).Infof("After pipelined Task <%v/%v> to Node <%v>: idle <%v>, used <%v>, releasing <%v>",
task.Namespace, task.Name, node.Name, node.Idle, node.Used, node.Releasing)
} else {
glog.Errorf("Failed to found Node <%s> in Session <%s> index when binding.",
hostname, s.ssn.UID)
}
for _, eh := range s.ssn.eventHandlers {
if eh.DeallocateFunc != nil {
eh.DeallocateFunc(&Event{
Task: task,
})
}
}
return nil
}
// Discard operation for evict and pipeline
func (s *Statement) Discard() {
glog.V(3).Info("Discarding operations ...")
for i := len(s.operations) - 1; i >= 0; i-- {
op := s.operations[i]
switch op.name {
case "evict":
s.unevict(op.args[0].(*api.TaskInfo), op.args[1].(string))
case "pipeline":
s.unpipeline(op.args[0].(*api.TaskInfo))
}
}
}
// Commit operation for evict and pipeline
func (s *Statement) Commit() {
glog.V(3).Info("Committing operations ...")
for _, op := range s.operations {
switch op.name {
case "evict":
s.evict(op.args[0].(*api.TaskInfo), op.args[1].(string))
case "pipeline":
s.pipeline(op.args[0].(*api.TaskInfo))
}
}
}

View File

@ -0,0 +1,191 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto" // auto-registry collectors in default registry
)
const (
// VolcanoNamespace - namespace in prometheus used by volcano
VolcanoNamespace = "volcano"
// OnSessionOpen label
OnSessionOpen = "OnSessionOpen"
// OnSessionClose label
OnSessionClose = "OnSessionClose"
)
var (
e2eSchedulingLatency = promauto.NewHistogram(
prometheus.HistogramOpts{
Subsystem: VolcanoNamespace,
Name: "e2e_scheduling_latency_milliseconds",
Help: "E2e scheduling latency in milliseconds (scheduling algorithm + binding)",
Buckets: prometheus.ExponentialBuckets(5, 2, 10),
},
)
pluginSchedulingLatency = promauto.NewHistogramVec(
prometheus.HistogramOpts{
Subsystem: VolcanoNamespace,
Name: "plugin_scheduling_latency_microseconds",
Help: "Plugin scheduling latency in microseconds",
Buckets: prometheus.ExponentialBuckets(5, 2, 10),
}, []string{"plugin", "OnSession"},
)
actionSchedulingLatency = promauto.NewHistogramVec(
prometheus.HistogramOpts{
Subsystem: VolcanoNamespace,
Name: "action_scheduling_latency_microseconds",
Help: "Action scheduling latency in microseconds",
Buckets: prometheus.ExponentialBuckets(5, 2, 10),
}, []string{"action"},
)
taskSchedulingLatency = promauto.NewHistogram(
prometheus.HistogramOpts{
Subsystem: VolcanoNamespace,
Name: "task_scheduling_latency_microseconds",
Help: "Task scheduling latency in microseconds",
Buckets: prometheus.ExponentialBuckets(5, 2, 10),
},
)
scheduleAttempts = promauto.NewCounterVec(
prometheus.CounterOpts{
Subsystem: VolcanoNamespace,
Name: "schedule_attempts_total",
Help: "Number of attempts to schedule pods, by the result. 'unschedulable' means a pod could not be scheduled, while 'error' means an internal scheduler problem.",
}, []string{"result"},
)
preemptionVictims = promauto.NewGauge(
prometheus.GaugeOpts{
Subsystem: VolcanoNamespace,
Name: "pod_preemption_victims",
Help: "Number of selected preemption victims",
},
)
preemptionAttempts = promauto.NewCounter(
prometheus.CounterOpts{
Subsystem: VolcanoNamespace,
Name: "total_preemption_attempts",
Help: "Total preemption attempts in the cluster till now",
},
)
unscheduleTaskCount = promauto.NewGaugeVec(
prometheus.GaugeOpts{
Subsystem: VolcanoNamespace,
Name: "unschedule_task_count",
Help: "Number of tasks could not be scheduled",
}, []string{"job_id"},
)
unscheduleJobCount = promauto.NewGauge(
prometheus.GaugeOpts{
Subsystem: VolcanoNamespace,
Name: "unschedule_job_count",
Help: "Number of jobs could not be scheduled",
},
)
jobRetryCount = promauto.NewCounterVec(
prometheus.CounterOpts{
Subsystem: VolcanoNamespace,
Name: "job_retry_counts",
Help: "Number of retry counts for one job",
}, []string{"job_id"},
)
)
// UpdatePluginDuration updates latency for every plugin
func UpdatePluginDuration(pluginName, OnSessionStatus string, duration time.Duration) {
pluginSchedulingLatency.WithLabelValues(pluginName, OnSessionStatus).Observe(DurationInMicroseconds(duration))
}
// UpdateActionDuration updates latency for every action
func UpdateActionDuration(actionName string, duration time.Duration) {
actionSchedulingLatency.WithLabelValues(actionName).Observe(DurationInMicroseconds(duration))
}
// UpdateE2eDuration updates entire end to end scheduling latency
func UpdateE2eDuration(duration time.Duration) {
e2eSchedulingLatency.Observe(DurationInMilliseconds(duration))
}
// UpdateTaskScheduleDuration updates single task scheduling latency
func UpdateTaskScheduleDuration(duration time.Duration) {
taskSchedulingLatency.Observe(DurationInMicroseconds(duration))
}
// UpdatePodScheduleStatus update pod schedule decision, could be Success, Failure, Error
func UpdatePodScheduleStatus(label string, count int) {
scheduleAttempts.WithLabelValues(label).Add(float64(count))
}
// UpdatePreemptionVictimsCount updates count of preemption victims
func UpdatePreemptionVictimsCount(victimsCount int) {
preemptionVictims.Set(float64(victimsCount))
}
// RegisterPreemptionAttempts records number of attempts for preemtion
func RegisterPreemptionAttempts() {
preemptionAttempts.Inc()
}
// UpdateUnscheduleTaskCount records total number of unscheduleable tasks
func UpdateUnscheduleTaskCount(jobID string, taskCount int) {
unscheduleTaskCount.WithLabelValues(jobID).Set(float64(taskCount))
}
// UpdateUnscheduleJobCount records total number of unscheduleable jobs
func UpdateUnscheduleJobCount(jobCount int) {
unscheduleJobCount.Set(float64(jobCount))
}
// RegisterJobRetries total number of job retries.
func RegisterJobRetries(jobID string) {
jobRetryCount.WithLabelValues(jobID).Inc()
}
// DurationInMicroseconds gets the time in microseconds.
func DurationInMicroseconds(duration time.Duration) float64 {
return float64(duration.Nanoseconds()) / float64(time.Microsecond.Nanoseconds())
}
// DurationInMilliseconds gets the time in milliseconds.
func DurationInMilliseconds(duration time.Duration) float64 {
return float64(duration.Nanoseconds()) / float64(time.Millisecond.Nanoseconds())
}
// DurationInSeconds gets the time in seconds.
func DurationInSeconds(duration time.Duration) float64 {
return duration.Seconds()
}
// Duration get the time since specified start
func Duration(start time.Time) time.Duration {
return time.Since(start)
}

View File

@ -0,0 +1,65 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package conformance
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/apis/scheduling"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework"
)
type conformancePlugin struct {
// Arguments given for the plugin
pluginArguments framework.Arguments
}
// New return conformance plugin
func New(arguments framework.Arguments) framework.Plugin {
return &conformancePlugin{pluginArguments: arguments}
}
func (pp *conformancePlugin) Name() string {
return "conformance"
}
func (pp *conformancePlugin) OnSessionOpen(ssn *framework.Session) {
evictableFn := func(evictor *api.TaskInfo, evictees []*api.TaskInfo) []*api.TaskInfo {
var victims []*api.TaskInfo
for _, evictee := range evictees {
className := evictee.Pod.Spec.PriorityClassName
// Skip critical pod.
if className == scheduling.SystemClusterCritical ||
className == scheduling.SystemNodeCritical ||
evictee.Namespace == v1.NamespaceSystem {
continue
}
victims = append(victims, evictee)
}
return victims
}
ssn.AddPreemptableFn(pp.Name(), evictableFn)
ssn.AddReclaimableFn(pp.Name(), evictableFn)
}
func (pp *conformancePlugin) OnSessionClose(ssn *framework.Session) {}

View File

@ -0,0 +1,52 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package plugins
import "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/conf"
// ApplyPluginConfDefaults sets option's filed to its default value if not set
func ApplyPluginConfDefaults(option *conf.PluginOption) {
t := true
if option.EnabledJobOrder == nil {
option.EnabledJobOrder = &t
}
if option.EnabledJobReady == nil {
option.EnabledJobReady = &t
}
if option.EnabledJobPipelined == nil {
option.EnabledJobPipelined = &t
}
if option.EnabledTaskOrder == nil {
option.EnabledTaskOrder = &t
}
if option.EnabledPreemptable == nil {
option.EnabledPreemptable = &t
}
if option.EnabledReclaimable == nil {
option.EnabledReclaimable = &t
}
if option.EnabledQueueOrder == nil {
option.EnabledQueueOrder = &t
}
if option.EnabledPredicate == nil {
option.EnabledPredicate = &t
}
if option.EnabledNodeOrder == nil {
option.EnabledNodeOrder = &t
}
}

View File

@ -0,0 +1,177 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package drf
import (
"math"
"github.com/golang/glog"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api/helpers"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework"
)
var shareDelta = 0.000001
type drfAttr struct {
share float64
dominantResource string
allocated *api.Resource
}
type drfPlugin struct {
totalResource *api.Resource
// Key is Job ID
jobOpts map[api.JobID]*drfAttr
// Arguments given for the plugin
pluginArguments framework.Arguments
}
// New return drf plugin
func New(arguments framework.Arguments) framework.Plugin {
return &drfPlugin{
totalResource: api.EmptyResource(),
jobOpts: map[api.JobID]*drfAttr{},
pluginArguments: arguments,
}
}
func (drf *drfPlugin) Name() string {
return "drf"
}
func (drf *drfPlugin) OnSessionOpen(ssn *framework.Session) {
// Prepare scheduling data for this session.
for _, n := range ssn.Nodes {
drf.totalResource.Add(n.Allocatable)
}
for _, job := range ssn.Jobs {
attr := &drfAttr{
allocated: api.EmptyResource(),
}
for status, tasks := range job.TaskStatusIndex {
if api.AllocatedStatus(status) {
for _, t := range tasks {
attr.allocated.Add(t.Resreq)
}
}
}
// Calculate the init share of Job
drf.updateShare(attr)
drf.jobOpts[job.UID] = attr
}
preemptableFn := func(preemptor *api.TaskInfo, preemptees []*api.TaskInfo) []*api.TaskInfo {
var victims []*api.TaskInfo
latt := drf.jobOpts[preemptor.Job]
lalloc := latt.allocated.Clone().Add(preemptor.Resreq)
ls := drf.calculateShare(lalloc, drf.totalResource)
allocations := map[api.JobID]*api.Resource{}
for _, preemptee := range preemptees {
if _, found := allocations[preemptee.Job]; !found {
ratt := drf.jobOpts[preemptee.Job]
allocations[preemptee.Job] = ratt.allocated.Clone()
}
ralloc := allocations[preemptee.Job].Sub(preemptee.Resreq)
rs := drf.calculateShare(ralloc, drf.totalResource)
if ls < rs || math.Abs(ls-rs) <= shareDelta {
victims = append(victims, preemptee)
}
}
glog.V(4).Infof("Victims from DRF plugins are %+v", victims)
return victims
}
ssn.AddPreemptableFn(drf.Name(), preemptableFn)
jobOrderFn := func(l interface{}, r interface{}) int {
lv := l.(*api.JobInfo)
rv := r.(*api.JobInfo)
glog.V(4).Infof("DRF JobOrderFn: <%v/%v> share state: %d, <%v/%v> share state: %d",
lv.Namespace, lv.Name, drf.jobOpts[lv.UID].share, rv.Namespace, rv.Name, drf.jobOpts[rv.UID].share)
if drf.jobOpts[lv.UID].share == drf.jobOpts[rv.UID].share {
return 0
}
if drf.jobOpts[lv.UID].share < drf.jobOpts[rv.UID].share {
return -1
}
return 1
}
ssn.AddJobOrderFn(drf.Name(), jobOrderFn)
// Register event handlers.
ssn.AddEventHandler(&framework.EventHandler{
AllocateFunc: func(event *framework.Event) {
attr := drf.jobOpts[event.Task.Job]
attr.allocated.Add(event.Task.Resreq)
drf.updateShare(attr)
glog.V(4).Infof("DRF AllocateFunc: task <%v/%v>, resreq <%v>, share <%v>",
event.Task.Namespace, event.Task.Name, event.Task.Resreq, attr.share)
},
DeallocateFunc: func(event *framework.Event) {
attr := drf.jobOpts[event.Task.Job]
attr.allocated.Sub(event.Task.Resreq)
drf.updateShare(attr)
glog.V(4).Infof("DRF EvictFunc: task <%v/%v>, resreq <%v>, share <%v>",
event.Task.Namespace, event.Task.Name, event.Task.Resreq, attr.share)
},
})
}
func (drf *drfPlugin) updateShare(attr *drfAttr) {
attr.share = drf.calculateShare(attr.allocated, drf.totalResource)
}
func (drf *drfPlugin) calculateShare(allocated, totalResource *api.Resource) float64 {
res := float64(0)
for _, rn := range totalResource.ResourceNames() {
share := helpers.Share(allocated.Get(rn), totalResource.Get(rn))
if share > res {
res = share
}
}
return res
}
func (drf *drfPlugin) OnSessionClose(session *framework.Session) {
// Clean schedule data.
drf.totalResource = api.EmptyResource()
drf.jobOpts = map[api.JobID]*drfAttr{}
}

View File

@ -0,0 +1,42 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package plugins
import (
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/plugins/conformance"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/plugins/drf"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/plugins/gang"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/plugins/nodeorder"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/plugins/predicates"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/plugins/priority"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/plugins/proportion"
)
func init() {
// Plugins for Jobs
framework.RegisterPluginBuilder("drf", drf.New)
framework.RegisterPluginBuilder("gang", gang.New)
framework.RegisterPluginBuilder("predicates", predicates.New)
framework.RegisterPluginBuilder("priority", priority.New)
framework.RegisterPluginBuilder("nodeorder", nodeorder.New)
framework.RegisterPluginBuilder("conformance", conformance.New)
// Plugins for Queues
framework.RegisterPluginBuilder("proportion", proportion.New)
}

View File

@ -0,0 +1,162 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package gang
import (
"fmt"
"github.com/golang/glog"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/metrics"
)
type gangPlugin struct {
// Arguments given for the plugin
pluginArguments framework.Arguments
}
// New return gang plugin
func New(arguments framework.Arguments) framework.Plugin {
return &gangPlugin{pluginArguments: arguments}
}
func (gp *gangPlugin) Name() string {
return "gang"
}
func (gp *gangPlugin) OnSessionOpen(ssn *framework.Session) {
validJobFn := func(obj interface{}) *api.ValidateResult {
job, ok := obj.(*api.JobInfo)
if !ok {
return &api.ValidateResult{
Pass: false,
Message: fmt.Sprintf("Failed to convert <%v> to *JobInfo", obj),
}
}
vtn := job.ValidTaskNum()
if vtn < job.MinAvailable {
return &api.ValidateResult{
Pass: false,
Reason: v1alpha1.NotEnoughPodsReason,
Message: fmt.Sprintf("Not enough valid tasks for gang-scheduling, valid: %d, min: %d",
vtn, job.MinAvailable),
}
}
return nil
}
ssn.AddJobValidFn(gp.Name(), validJobFn)
preemptableFn := func(preemptor *api.TaskInfo, preemptees []*api.TaskInfo) []*api.TaskInfo {
var victims []*api.TaskInfo
for _, preemptee := range preemptees {
job := ssn.Jobs[preemptee.Job]
occupid := job.ReadyTaskNum()
preemptable := job.MinAvailable <= occupid-1 || job.MinAvailable == 1
if !preemptable {
glog.V(3).Infof("Can not preempt task <%v/%v> because of gang-scheduling",
preemptee.Namespace, preemptee.Name)
} else {
victims = append(victims, preemptee)
}
}
glog.V(3).Infof("Victims from Gang plugins are %+v", victims)
return victims
}
// TODO(k82cn): Support preempt/reclaim batch job.
ssn.AddReclaimableFn(gp.Name(), preemptableFn)
ssn.AddPreemptableFn(gp.Name(), preemptableFn)
jobOrderFn := func(l, r interface{}) int {
lv := l.(*api.JobInfo)
rv := r.(*api.JobInfo)
lReady := lv.Ready()
rReady := rv.Ready()
glog.V(4).Infof("Gang JobOrderFn: <%v/%v> is ready: %t, <%v/%v> is ready: %t",
lv.Namespace, lv.Name, lReady, rv.Namespace, rv.Name, rReady)
if lReady && rReady {
return 0
}
if lReady {
return 1
}
if rReady {
return -1
}
return 0
}
ssn.AddJobOrderFn(gp.Name(), jobOrderFn)
ssn.AddJobReadyFn(gp.Name(), func(obj interface{}) bool {
ji := obj.(*api.JobInfo)
return ji.Ready()
})
ssn.AddJobPipelinedFn(gp.Name(), func(obj interface{}) bool {
ji := obj.(*api.JobInfo)
return ji.Pipelined()
})
}
func (gp *gangPlugin) OnSessionClose(ssn *framework.Session) {
var unreadyTaskCount int32
var unScheduleJobCount int
for _, job := range ssn.Jobs {
if !job.Ready() {
unreadyTaskCount = job.MinAvailable - job.ReadyTaskNum()
msg := fmt.Sprintf("%v/%v tasks in gang unschedulable: %v",
job.MinAvailable-job.ReadyTaskNum(), len(job.Tasks), job.FitError())
unScheduleJobCount++
metrics.UpdateUnscheduleTaskCount(job.Name, int(unreadyTaskCount))
metrics.RegisterJobRetries(job.Name)
jc := &v1alpha1.PodGroupCondition{
Type: v1alpha1.PodGroupUnschedulableType,
Status: v1.ConditionTrue,
LastTransitionTime: metav1.Now(),
TransitionID: string(ssn.UID),
Reason: v1alpha1.NotEnoughResourcesReason,
Message: msg,
}
if err := ssn.UpdateJobCondition(job, jc); err != nil {
glog.Errorf("Failed to update job <%s/%s> condition: %v",
job.Namespace, job.Name, err)
}
}
}
metrics.UpdateUnscheduleJobCount(unScheduleJobCount)
}

View File

@ -0,0 +1,226 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package nodeorder
import (
"fmt"
"github.com/golang/glog"
v1 "k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/scheduler/algorithm/priorities"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
"k8s.io/kubernetes/pkg/scheduler/cache"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/plugins/util"
)
const (
// NodeAffinityWeight is the key for providing Node Affinity Priority Weight in YAML
NodeAffinityWeight = "nodeaffinity.weight"
// PodAffinityWeight is the key for providing Pod Affinity Priority Weight in YAML
PodAffinityWeight = "podaffinity.weight"
// LeastRequestedWeight is the key for providing Least Requested Priority Weight in YAML
LeastRequestedWeight = "leastrequested.weight"
// BalancedResourceWeight is the key for providing Balanced Resource Priority Weight in YAML
BalancedResourceWeight = "balancedresource.weight"
)
type nodeOrderPlugin struct {
// Arguments given for the plugin
pluginArguments framework.Arguments
}
func getInterPodAffinityScore(name string, interPodAffinityScore schedulerapi.HostPriorityList) int {
for _, hostPriority := range interPodAffinityScore {
if hostPriority.Host == name {
return hostPriority.Score
}
}
return 0
}
func generateNodeMapAndSlice(nodes map[string]*api.NodeInfo) (map[string]*cache.NodeInfo, []*v1.Node) {
var nodeMap map[string]*cache.NodeInfo
var nodeSlice []*v1.Node
nodeMap = make(map[string]*cache.NodeInfo)
for _, node := range nodes {
nodeInfo := cache.NewNodeInfo(node.Pods()...)
nodeInfo.SetNode(node.Node)
nodeMap[node.Name] = nodeInfo
nodeSlice = append(nodeSlice, node.Node)
}
return nodeMap, nodeSlice
}
type cachedNodeInfo struct {
session *framework.Session
}
func (c *cachedNodeInfo) GetNodeInfo(name string) (*v1.Node, error) {
node, found := c.session.Nodes[name]
if !found {
for _, cacheNode := range c.session.Nodes {
pods := cacheNode.Pods()
for _, pod := range pods {
if pod.Spec.NodeName == "" {
return cacheNode.Node, nil
}
}
}
return nil, fmt.Errorf("failed to find node <%s>", name)
}
return node.Node, nil
}
//New function returns prioritizePlugin object
func New(aruguments framework.Arguments) framework.Plugin {
return &nodeOrderPlugin{pluginArguments: aruguments}
}
func (pp *nodeOrderPlugin) Name() string {
return "nodeorder"
}
type priorityWeight struct {
leastReqWeight int
nodeAffinityWeight int
podAffinityWeight int
balancedRescourceWeight int
}
func calculateWeight(args framework.Arguments) priorityWeight {
/*
User Should give priorityWeight in this format(nodeaffinity.weight, podaffinity.weight, leastrequested.weight, balancedresource.weight).
Currently supported only for nodeaffinity, podaffinity, leastrequested, balancedresouce priorities.
actions: "reclaim, allocate, backfill, preempt"
tiers:
- plugins:
- name: priority
- name: gang
- name: conformance
- plugins:
- name: drf
- name: predicates
- name: proportion
- name: nodeorder
arguments:
nodeaffinity.weight: 2
podaffinity.weight: 2
leastrequested.weight: 2
balancedresource.weight: 2
*/
// Values are initialized to 1.
weight := priorityWeight{
leastReqWeight: 1,
nodeAffinityWeight: 1,
podAffinityWeight: 1,
balancedRescourceWeight: 1,
}
// Checks whether nodeaffinity.weight is provided or not, if given, modifies the value in weight struct.
args.GetInt(&weight.nodeAffinityWeight, NodeAffinityWeight)
// Checks whether podaffinity.weight is provided or not, if given, modifies the value in weight struct.
args.GetInt(&weight.podAffinityWeight, PodAffinityWeight)
// Checks whether leastrequested.weight is provided or not, if given, modifies the value in weight struct.
args.GetInt(&weight.leastReqWeight, LeastRequestedWeight)
// Checks whether balancedresource.weight is provided or not, if given, modifies the value in weight struct.
args.GetInt(&weight.balancedRescourceWeight, BalancedResourceWeight)
return weight
}
func (pp *nodeOrderPlugin) OnSessionOpen(ssn *framework.Session) {
nodeOrderFn := func(task *api.TaskInfo, node *api.NodeInfo) (float64, error) {
weight := calculateWeight(pp.pluginArguments)
pl := &util.PodLister{
Session: ssn,
}
nl := &util.NodeLister{
Session: ssn,
}
cn := &cachedNodeInfo{
session: ssn,
}
var nodeMap map[string]*cache.NodeInfo
var nodeSlice []*v1.Node
var interPodAffinityScore schedulerapi.HostPriorityList
nodeMap, nodeSlice = generateNodeMapAndSlice(ssn.Nodes)
nodeInfo := cache.NewNodeInfo(node.Pods()...)
nodeInfo.SetNode(node.Node)
var score = 0.0
//TODO: Add ImageLocalityPriority Function once priorityMetadata is published
//Issue: #74132 in kubernetes ( https://github.com/kubernetes/kubernetes/issues/74132 )
host, err := priorities.LeastRequestedPriorityMap(task.Pod, nil, nodeInfo)
if err != nil {
glog.Warningf("Least Requested Priority Failed because of Error: %v", err)
return 0, err
}
// If leastReqWeight in provided, host.Score is multiplied with weight, if not, host.Score is added to total score.
score = score + float64(host.Score*weight.leastReqWeight)
host, err = priorities.BalancedResourceAllocationMap(task.Pod, nil, nodeInfo)
if err != nil {
glog.Warningf("Balanced Resource Allocation Priority Failed because of Error: %v", err)
return 0, err
}
// If balancedRescourceWeight in provided, host.Score is multiplied with weight, if not, host.Score is added to total score.
score = score + float64(host.Score*weight.balancedRescourceWeight)
host, err = priorities.CalculateNodeAffinityPriorityMap(task.Pod, nil, nodeInfo)
if err != nil {
glog.Warningf("Calculate Node Affinity Priority Failed because of Error: %v", err)
return 0, err
}
// If nodeAffinityWeight in provided, host.Score is multiplied with weight, if not, host.Score is added to total score.
score = score + float64(host.Score*weight.nodeAffinityWeight)
mapFn := priorities.NewInterPodAffinityPriority(cn, nl, pl, v1.DefaultHardPodAffinitySymmetricWeight)
interPodAffinityScore, err = mapFn(task.Pod, nodeMap, nodeSlice)
if err != nil {
glog.Warningf("Calculate Inter Pod Affinity Priority Failed because of Error: %v", err)
return 0, err
}
hostScore := getInterPodAffinityScore(node.Name, interPodAffinityScore)
// If podAffinityWeight in provided, host.Score is multiplied with weight, if not, host.Score is added to total score.
score = score + float64(hostScore*weight.podAffinityWeight)
glog.V(4).Infof("Total Score for that node is: %d", score)
return score, nil
}
ssn.AddNodeOrderFn(pp.Name(), nodeOrderFn)
}
func (pp *nodeOrderPlugin) OnSessionClose(ssn *framework.Session) {
}

View File

@ -0,0 +1,205 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package predicates
import (
"fmt"
"strings"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/scheduler/algorithm"
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
"k8s.io/kubernetes/pkg/scheduler/cache"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/plugins/util"
)
type predicatesPlugin struct {
// Arguments given for the plugin
pluginArguments framework.Arguments
}
// New return predicate plugin
func New(arguments framework.Arguments) framework.Plugin {
return &predicatesPlugin{pluginArguments: arguments}
}
func (pp *predicatesPlugin) Name() string {
return "predicates"
}
func formatReason(reasons []algorithm.PredicateFailureReason) string {
reasonStrings := []string{}
for _, v := range reasons {
reasonStrings = append(reasonStrings, fmt.Sprintf("%v", v.GetReason()))
}
return strings.Join(reasonStrings, ", ")
}
func (pp *predicatesPlugin) OnSessionOpen(ssn *framework.Session) {
pl := &util.PodLister{
Session: ssn,
}
ni := &util.CachedNodeInfo{
Session: ssn,
}
ssn.AddPredicateFn(pp.Name(), func(task *api.TaskInfo, node *api.NodeInfo) error {
nodeInfo := cache.NewNodeInfo(node.Pods()...)
nodeInfo.SetNode(node.Node)
if node.Allocatable.MaxTaskNum <= len(nodeInfo.Pods()) {
return fmt.Errorf("node <%s> can not allow more task running on it", node.Name)
}
// CheckNodeCondition Predicate
fit, reasons, err := predicates.CheckNodeConditionPredicate(task.Pod, nil, nodeInfo)
if err != nil {
return err
}
glog.V(4).Infof("CheckNodeCondition predicates Task <%s/%s> on Node <%s>: fit %t, err %v",
task.Namespace, task.Name, node.Name, fit, err)
if !fit {
return fmt.Errorf("node <%s> are not available to schedule task <%s/%s>: %s",
node.Name, task.Namespace, task.Name, formatReason(reasons))
}
// CheckNodeUnschedulable Predicate
fit, _, err = predicates.CheckNodeUnschedulablePredicate(task.Pod, nil, nodeInfo)
if err != nil {
return err
}
glog.V(4).Infof("CheckNodeUnschedulable Predicate Task <%s/%s> on Node <%s>: fit %t, err %v",
task.Namespace, task.Name, node.Name, fit, err)
if !fit {
return fmt.Errorf("task <%s/%s> node <%s> set to unschedulable",
task.Namespace, task.Name, node.Name)
}
// NodeSelector Predicate
fit, _, err = predicates.PodMatchNodeSelector(task.Pod, nil, nodeInfo)
if err != nil {
return err
}
glog.V(4).Infof("NodeSelect predicates Task <%s/%s> on Node <%s>: fit %t, err %v",
task.Namespace, task.Name, node.Name, fit, err)
if !fit {
return fmt.Errorf("node <%s> didn't match task <%s/%s> node selector",
node.Name, task.Namespace, task.Name)
}
// HostPorts Predicate
fit, _, err = predicates.PodFitsHostPorts(task.Pod, nil, nodeInfo)
if err != nil {
return err
}
glog.V(4).Infof("HostPorts predicates Task <%s/%s> on Node <%s>: fit %t, err %v",
task.Namespace, task.Name, node.Name, fit, err)
if !fit {
return fmt.Errorf("node <%s> didn't have available host ports for task <%s/%s>",
node.Name, task.Namespace, task.Name)
}
// Toleration/Taint Predicate
fit, _, err = predicates.PodToleratesNodeTaints(task.Pod, nil, nodeInfo)
if err != nil {
return err
}
glog.V(4).Infof("Toleration/Taint predicates Task <%s/%s> on Node <%s>: fit %t, err %v",
task.Namespace, task.Name, node.Name, fit, err)
if !fit {
return fmt.Errorf("task <%s/%s> does not tolerate node <%s> taints",
task.Namespace, task.Name, node.Name)
}
// CheckNodeMemoryPressurePredicate
fit, _, err = predicates.CheckNodeMemoryPressurePredicate(task.Pod, nil, nodeInfo)
if err != nil {
return err
}
glog.V(4).Infof("CheckNodeMemoryPressure predicates Task <%s/%s> on Node <%s>: fit %t, err %v",
task.Namespace, task.Name, node.Name, fit, err)
if !fit {
return fmt.Errorf("node <%s> are not available to schedule task <%s/%s> due to Memory Pressure",
node.Name, task.Namespace, task.Name)
}
// CheckNodeDiskPressurePredicate
fit, _, err = predicates.CheckNodeDiskPressurePredicate(task.Pod, nil, nodeInfo)
if err != nil {
return err
}
glog.V(4).Infof("CheckNodeDiskPressure predicates Task <%s/%s> on Node <%s>: fit %t, err %v",
task.Namespace, task.Name, node.Name, fit, err)
if !fit {
return fmt.Errorf("node <%s> are not available to schedule task <%s/%s> due to Disk Pressure",
node.Name, task.Namespace, task.Name)
}
// CheckNodePIDPressurePredicate
fit, _, err = predicates.CheckNodePIDPressurePredicate(task.Pod, nil, nodeInfo)
if err != nil {
return err
}
glog.V(4).Infof("CheckNodePIDPressurePredicate predicates Task <%s/%s> on Node <%s>: fit %t, err %v",
task.Namespace, task.Name, node.Name, fit, err)
if !fit {
return fmt.Errorf("node <%s> are not available to schedule task <%s/%s> due to PID Pressure",
node.Name, task.Namespace, task.Name)
}
// Pod Affinity/Anti-Affinity Predicate
podAffinityPredicate := predicates.NewPodAffinityPredicate(ni, pl)
fit, _, err = podAffinityPredicate(task.Pod, nil, nodeInfo)
if err != nil {
return err
}
glog.V(4).Infof("Pod Affinity/Anti-Affinity predicates Task <%s/%s> on Node <%s>: fit %t, err %v",
task.Namespace, task.Name, node.Name, fit, err)
if !fit {
return fmt.Errorf("task <%s/%s> affinity/anti-affinity failed on node <%s>",
node.Name, task.Namespace, task.Name)
}
return nil
})
}
func (pp *predicatesPlugin) OnSessionClose(ssn *framework.Session) {}

View File

@ -0,0 +1,82 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package priority
import (
"github.com/golang/glog"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework"
)
type priorityPlugin struct {
// Arguments given for the plugin
pluginArguments framework.Arguments
}
// New return priority plugin
func New(arguments framework.Arguments) framework.Plugin {
return &priorityPlugin{pluginArguments: arguments}
}
func (pp *priorityPlugin) Name() string {
return "priority"
}
func (pp *priorityPlugin) OnSessionOpen(ssn *framework.Session) {
taskOrderFn := func(l interface{}, r interface{}) int {
lv := l.(*api.TaskInfo)
rv := r.(*api.TaskInfo)
glog.V(4).Infof("Priority TaskOrder: <%v/%v> priority is %v, <%v/%v> priority is %v",
lv.Namespace, lv.Name, lv.Priority, rv.Namespace, rv.Name, rv.Priority)
if lv.Priority == rv.Priority {
return 0
}
if lv.Priority > rv.Priority {
return -1
}
return 1
}
// Add Task Order function
ssn.AddTaskOrderFn(pp.Name(), taskOrderFn)
jobOrderFn := func(l, r interface{}) int {
lv := l.(*api.JobInfo)
rv := r.(*api.JobInfo)
glog.V(4).Infof("Priority JobOrderFn: <%v/%v> priority: %d, <%v/%v> priority: %d",
lv.Namespace, lv.Name, lv.Priority, rv.Namespace, rv.Name, rv.Priority)
if lv.Priority > rv.Priority {
return -1
}
if lv.Priority < rv.Priority {
return 1
}
return 0
}
ssn.AddJobOrderFn(pp.Name(), jobOrderFn)
}
func (pp *priorityPlugin) OnSessionClose(ssn *framework.Session) {}

View File

@ -0,0 +1,243 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package proportion
import (
"github.com/golang/glog"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api/helpers"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework"
)
type proportionPlugin struct {
totalResource *api.Resource
queueOpts map[api.QueueID]*queueAttr
// Arguments given for the plugin
pluginArguments framework.Arguments
}
type queueAttr struct {
queueID api.QueueID
name string
weight int32
share float64
deserved *api.Resource
allocated *api.Resource
request *api.Resource
}
// New return proportion action
func New(arguments framework.Arguments) framework.Plugin {
return &proportionPlugin{
totalResource: api.EmptyResource(),
queueOpts: map[api.QueueID]*queueAttr{},
pluginArguments: arguments,
}
}
func (pp *proportionPlugin) Name() string {
return "proportion"
}
func (pp *proportionPlugin) OnSessionOpen(ssn *framework.Session) {
// Prepare scheduling data for this session.
for _, n := range ssn.Nodes {
pp.totalResource.Add(n.Allocatable)
}
glog.V(4).Infof("The total resource is <%v>", pp.totalResource)
// Build attributes for Queues.
for _, job := range ssn.Jobs {
glog.V(4).Infof("Considering Job <%s/%s>.", job.Namespace, job.Name)
if _, found := pp.queueOpts[job.Queue]; !found {
queue := ssn.Queues[job.Queue]
attr := &queueAttr{
queueID: queue.UID,
name: queue.Name,
weight: queue.Weight,
deserved: api.EmptyResource(),
allocated: api.EmptyResource(),
request: api.EmptyResource(),
}
pp.queueOpts[job.Queue] = attr
glog.V(4).Infof("Added Queue <%s> attributes.", job.Queue)
}
for status, tasks := range job.TaskStatusIndex {
if api.AllocatedStatus(status) {
for _, t := range tasks {
attr := pp.queueOpts[job.Queue]
attr.allocated.Add(t.Resreq)
attr.request.Add(t.Resreq)
}
} else if status == api.Pending {
for _, t := range tasks {
attr := pp.queueOpts[job.Queue]
attr.request.Add(t.Resreq)
}
}
}
}
remaining := pp.totalResource.Clone()
meet := map[api.QueueID]struct{}{}
for {
totalWeight := int32(0)
for _, attr := range pp.queueOpts {
if _, found := meet[attr.queueID]; found {
continue
}
totalWeight += attr.weight
}
// If no queues, break
if totalWeight == 0 {
break
}
// Calculates the deserved of each Queue.
deserved := api.EmptyResource()
for _, attr := range pp.queueOpts {
glog.V(4).Infof("Considering Queue <%s>: weight <%d>, total weight <%d>.",
attr.name, attr.weight, totalWeight)
if _, found := meet[attr.queueID]; found {
continue
}
oldDeserved := attr.deserved.Clone()
attr.deserved.Add(remaining.Clone().Multi(float64(attr.weight) / float64(totalWeight)))
if !attr.deserved.LessEqual(attr.request) {
attr.deserved = helpers.Min(attr.deserved, attr.request)
meet[attr.queueID] = struct{}{}
}
pp.updateShare(attr)
glog.V(4).Infof("The attributes of queue <%s> in proportion: deserved <%v>, allocate <%v>, request <%v>, share <%0.2f>",
attr.name, attr.deserved, attr.allocated, attr.request, attr.share)
deserved.Add(attr.deserved.Clone().Sub(oldDeserved))
}
remaining.Sub(deserved)
if remaining.IsEmpty() {
break
}
}
ssn.AddQueueOrderFn(pp.Name(), func(l, r interface{}) int {
lv := l.(*api.QueueInfo)
rv := r.(*api.QueueInfo)
if pp.queueOpts[lv.UID].share == pp.queueOpts[rv.UID].share {
return 0
}
if pp.queueOpts[lv.UID].share < pp.queueOpts[rv.UID].share {
return -1
}
return 1
})
ssn.AddReclaimableFn(pp.Name(), func(reclaimer *api.TaskInfo, reclaimees []*api.TaskInfo) []*api.TaskInfo {
var victims []*api.TaskInfo
allocations := map[api.QueueID]*api.Resource{}
for _, reclaimee := range reclaimees {
job := ssn.Jobs[reclaimee.Job]
attr := pp.queueOpts[job.Queue]
if _, found := allocations[job.Queue]; !found {
allocations[job.Queue] = attr.allocated.Clone()
}
allocated := allocations[job.Queue]
if allocated.Less(reclaimee.Resreq) {
glog.Errorf("Failed to allocate resource for Task <%s/%s> in Queue <%s> not enough resource.",
reclaimee.Namespace, reclaimee.Name, job.Queue)
continue
}
allocated.Sub(reclaimee.Resreq)
if attr.deserved.LessEqual(allocated) {
victims = append(victims, reclaimee)
}
}
return victims
})
ssn.AddOverusedFn(pp.Name(), func(obj interface{}) bool {
queue := obj.(*api.QueueInfo)
attr := pp.queueOpts[queue.UID]
overused := attr.deserved.LessEqual(attr.allocated)
if overused {
glog.V(3).Infof("Queue <%v>: deserved <%v>, allocated <%v>, share <%v>",
queue.Name, attr.deserved, attr.allocated, attr.share)
}
return overused
})
// Register event handlers.
ssn.AddEventHandler(&framework.EventHandler{
AllocateFunc: func(event *framework.Event) {
job := ssn.Jobs[event.Task.Job]
attr := pp.queueOpts[job.Queue]
attr.allocated.Add(event.Task.Resreq)
pp.updateShare(attr)
glog.V(4).Infof("Proportion AllocateFunc: task <%v/%v>, resreq <%v>, share <%v>",
event.Task.Namespace, event.Task.Name, event.Task.Resreq, attr.share)
},
DeallocateFunc: func(event *framework.Event) {
job := ssn.Jobs[event.Task.Job]
attr := pp.queueOpts[job.Queue]
attr.allocated.Sub(event.Task.Resreq)
pp.updateShare(attr)
glog.V(4).Infof("Proportion EvictFunc: task <%v/%v>, resreq <%v>, share <%v>",
event.Task.Namespace, event.Task.Name, event.Task.Resreq, attr.share)
},
})
}
func (pp *proportionPlugin) OnSessionClose(ssn *framework.Session) {
pp.totalResource = nil
pp.queueOpts = nil
}
func (pp *proportionPlugin) updateShare(attr *queueAttr) {
res := float64(0)
// TODO(k82cn): how to handle fragment issues?
for _, rn := range attr.deserved.ResourceNames() {
share := helpers.Share(attr.allocated.Get(rn), attr.deserved.Get(rn))
if share > res {
res = share
}
}
attr.share = res
}

View File

@ -0,0 +1,114 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/kubernetes/pkg/scheduler/algorithm"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework"
)
// PodLister is used in predicate and nodeorder plugin
type PodLister struct {
Session *framework.Session
}
// List method is used to list all the pods
func (pl *PodLister) List(selector labels.Selector) ([]*v1.Pod, error) {
var pods []*v1.Pod
for _, job := range pl.Session.Jobs {
for status, tasks := range job.TaskStatusIndex {
if !api.AllocatedStatus(status) {
continue
}
for _, task := range tasks {
if selector.Matches(labels.Set(task.Pod.Labels)) {
if task.NodeName != task.Pod.Spec.NodeName {
pod := task.Pod.DeepCopy()
pod.Spec.NodeName = task.NodeName
pods = append(pods, pod)
} else {
pods = append(pods, task.Pod)
}
}
}
}
}
return pods, nil
}
// FilteredList is used to list all the pods under filter condition
func (pl *PodLister) FilteredList(podFilter algorithm.PodFilter, selector labels.Selector) ([]*v1.Pod, error) {
var pods []*v1.Pod
for _, job := range pl.Session.Jobs {
for status, tasks := range job.TaskStatusIndex {
if !api.AllocatedStatus(status) {
continue
}
for _, task := range tasks {
if podFilter(task.Pod) && selector.Matches(labels.Set(task.Pod.Labels)) {
if task.NodeName != task.Pod.Spec.NodeName {
pod := task.Pod.DeepCopy()
pod.Spec.NodeName = task.NodeName
pods = append(pods, pod)
} else {
pods = append(pods, task.Pod)
}
}
}
}
}
return pods, nil
}
// CachedNodeInfo is used in nodeorder and predicate plugin
type CachedNodeInfo struct {
Session *framework.Session
}
// GetNodeInfo is used to get info of a particular node
func (c *CachedNodeInfo) GetNodeInfo(name string) (*v1.Node, error) {
node, found := c.Session.Nodes[name]
if !found {
return nil, fmt.Errorf("failed to find node <%s>", name)
}
return node.Node, nil
}
// NodeLister is used in nodeorder plugin
type NodeLister struct {
Session *framework.Session
}
// List is used to list all the nodes
func (nl *NodeLister) List() ([]*v1.Node, error) {
var nodes []*v1.Node
for _, node := range nl.Session.Nodes {
nodes = append(nodes, node.Node)
}
return nodes, nil
}

View File

@ -0,0 +1,102 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduler
import (
"time"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/rest"
schedcache "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/cache"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/conf"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/metrics"
)
// Scheduler watches for new unscheduled pods for kubebatch. It attempts to find
// nodes that they fit on and writes bindings back to the api server.
type Scheduler struct {
cache schedcache.Cache
config *rest.Config
actions []framework.Action
plugins []conf.Tier
schedulerConf string
schedulePeriod time.Duration
}
// NewScheduler returns a scheduler
func NewScheduler(
config *rest.Config,
schedulerName string,
conf string,
period time.Duration,
defaultQueue string,
) (*Scheduler, error) {
scheduler := &Scheduler{
config: config,
schedulerConf: conf,
cache: schedcache.New(config, schedulerName, defaultQueue),
schedulePeriod: period,
}
return scheduler, nil
}
// Run runs the Scheduler
func (pc *Scheduler) Run(stopCh <-chan struct{}) {
var err error
// Start cache for policy.
go pc.cache.Run(stopCh)
pc.cache.WaitForCacheSync(stopCh)
// Load configuration of scheduler
schedConf := defaultSchedulerConf
if len(pc.schedulerConf) != 0 {
if schedConf, err = readSchedulerConf(pc.schedulerConf); err != nil {
glog.Errorf("Failed to read scheduler configuration '%s', using default configuration: %v",
pc.schedulerConf, err)
schedConf = defaultSchedulerConf
}
}
pc.actions, pc.plugins, err = loadSchedulerConf(schedConf)
if err != nil {
panic(err)
}
go wait.Until(pc.runOnce, pc.schedulePeriod, stopCh)
}
func (pc *Scheduler) runOnce() {
glog.V(4).Infof("Start scheduling ...")
scheduleStartTime := time.Now()
defer glog.V(4).Infof("End scheduling ...")
defer metrics.UpdateE2eDuration(metrics.Duration(scheduleStartTime))
ssn := framework.OpenSession(pc.cache, pc.plugins)
defer framework.CloseSession(ssn)
for _, action := range pc.actions {
actionStartTime := time.Now()
action.Execute(ssn)
metrics.UpdateActionDuration(action.Name(), metrics.Duration(actionStartTime))
}
}

View File

@ -0,0 +1,81 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduler
import (
"fmt"
"io/ioutil"
"strings"
"gopkg.in/yaml.v2"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/conf"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/framework"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/plugins"
)
var defaultSchedulerConf = `
actions: "allocate, backfill"
tiers:
- plugins:
- name: priority
- name: gang
- plugins:
- name: drf
- name: predicates
- name: proportion
- name: nodeorder
`
func loadSchedulerConf(confStr string) ([]framework.Action, []conf.Tier, error) {
var actions []framework.Action
schedulerConf := &conf.SchedulerConfiguration{}
buf := make([]byte, len(confStr))
copy(buf, confStr)
if err := yaml.Unmarshal(buf, schedulerConf); err != nil {
return nil, nil, err
}
// Set default settings for each plugin if not set
for i, tier := range schedulerConf.Tiers {
for j := range tier.Plugins {
plugins.ApplyPluginConfDefaults(&schedulerConf.Tiers[i].Plugins[j])
}
}
actionNames := strings.Split(schedulerConf.Actions, ",")
for _, actionName := range actionNames {
if action, found := framework.GetAction(strings.TrimSpace(actionName)); found {
actions = append(actions, action)
} else {
return nil, nil, fmt.Errorf("failed to found Action %s, ignore it", actionName)
}
}
return actions, schedulerConf.Tiers, nil
}
func readSchedulerConf(confPath string) (string, error) {
dat, err := ioutil.ReadFile(confPath)
if err != nil {
return "", err
}
return string(dat), nil
}

View File

@ -0,0 +1,94 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"container/heap"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api"
)
//PriorityQueue implements a scheduling queue.
type PriorityQueue struct {
queue priorityQueue
}
type priorityQueue struct {
items []interface{}
lessFn api.LessFn
}
// NewPriorityQueue returns a PriorityQueue
func NewPriorityQueue(lessFn api.LessFn) *PriorityQueue {
return &PriorityQueue{
queue: priorityQueue{
items: make([]interface{}, 0),
lessFn: lessFn,
},
}
}
// Push pushes element in the priority Queue
func (q *PriorityQueue) Push(it interface{}) {
heap.Push(&q.queue, it)
}
// Pop pops element in the priority Queue
func (q *PriorityQueue) Pop() interface{} {
if q.Len() == 0 {
return nil
}
return heap.Pop(&q.queue)
}
// Empty check if queue is empty
func (q *PriorityQueue) Empty() bool {
return q.queue.Len() == 0
}
// Len returns Len of the priority queue
func (q *PriorityQueue) Len() int {
return q.queue.Len()
}
func (pq *priorityQueue) Len() int { return len(pq.items) }
func (pq *priorityQueue) Less(i, j int) bool {
if pq.lessFn == nil {
return i < j
}
// We want Pop to give us the highest, not lowest, priority so we use greater than here.
return pq.lessFn(pq.items[i], pq.items[j])
}
func (pq priorityQueue) Swap(i, j int) {
pq.items[i], pq.items[j] = pq.items[j], pq.items[i]
}
func (pq *priorityQueue) Push(x interface{}) {
(*pq).items = append((*pq).items, x)
}
func (pq *priorityQueue) Pop() interface{} {
old := (*pq).items
n := len(old)
item := old[n-1]
(*pq).items = old[0 : n-1]
return item
}

View File

@ -0,0 +1,114 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"context"
"math/rand"
"sort"
"sync"
"github.com/golang/glog"
"k8s.io/client-go/util/workqueue"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api"
)
// PredicateNodes returns nodes that fit task
func PredicateNodes(task *api.TaskInfo, nodes []*api.NodeInfo, fn api.PredicateFn) []*api.NodeInfo {
var predicateNodes []*api.NodeInfo
var workerLock sync.Mutex
checkNode := func(index int) {
node := nodes[index]
glog.V(3).Infof("Considering Task <%v/%v> on node <%v>: <%v> vs. <%v>",
task.Namespace, task.Name, node.Name, task.Resreq, node.Idle)
// TODO (k82cn): Enable eCache for performance improvement.
if err := fn(task, node); err != nil {
glog.Errorf("Predicates failed for task <%s/%s> on node <%s>: %v",
task.Namespace, task.Name, node.Name, err)
return
}
workerLock.Lock()
predicateNodes = append(predicateNodes, node)
workerLock.Unlock()
}
workqueue.ParallelizeUntil(context.TODO(), 16, len(nodes), checkNode)
return predicateNodes
}
// PrioritizeNodes returns a map whose key is node's score and value are corresponding nodes
func PrioritizeNodes(task *api.TaskInfo, nodes []*api.NodeInfo, fn api.NodeOrderFn) map[float64][]*api.NodeInfo {
nodeScores := map[float64][]*api.NodeInfo{}
var workerLock sync.Mutex
scoreNode := func(index int) {
node := nodes[index]
score, err := fn(task, node)
if err != nil {
glog.Errorf("Error in Calculating Priority for the node:%v", err)
return
}
workerLock.Lock()
nodeScores[score] = append(nodeScores[score], node)
workerLock.Unlock()
}
workqueue.ParallelizeUntil(context.TODO(), 16, len(nodes), scoreNode)
return nodeScores
}
// SortNodes returns nodes by order of score
func SortNodes(nodeScores map[float64][]*api.NodeInfo) []*api.NodeInfo {
var nodesInorder []*api.NodeInfo
var keys []float64
for key := range nodeScores {
keys = append(keys, key)
}
sort.Sort(sort.Reverse(sort.Float64Slice(keys)))
for _, key := range keys {
nodes := nodeScores[key]
nodesInorder = append(nodesInorder, nodes...)
}
return nodesInorder
}
// SelectBestNode returns best node whose score is highest, pick one randomly if there are many nodes with same score.
func SelectBestNode(nodeScores map[float64][]*api.NodeInfo) *api.NodeInfo {
var bestNodes []*api.NodeInfo
maxScore := -1.0
for score, nodes := range nodeScores {
if score > maxScore {
maxScore = score
bestNodes = nodes
}
}
return bestNodes[rand.Intn(len(bestNodes))]
}
// GetNodeList returns values of the map 'nodes'
func GetNodeList(nodes map[string]*api.NodeInfo) []*api.NodeInfo {
result := make([]*api.NodeInfo, 0, len(nodes))
for _, v := range nodes {
result = append(result, v)
}
return result
}

View File

@ -0,0 +1,163 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"fmt"
"sync"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
kbv1 "github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1"
"github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api"
)
// BuildResourceList builts resource list object
func BuildResourceList(cpu string, memory string) v1.ResourceList {
return v1.ResourceList{
v1.ResourceCPU: resource.MustParse(cpu),
v1.ResourceMemory: resource.MustParse(memory),
api.GPUResourceName: resource.MustParse("0"),
}
}
// BuildResourceListWithGPU builts resource list with GPU
func BuildResourceListWithGPU(cpu string, memory string, GPU string) v1.ResourceList {
return v1.ResourceList{
v1.ResourceCPU: resource.MustParse(cpu),
v1.ResourceMemory: resource.MustParse(memory),
api.GPUResourceName: resource.MustParse(GPU),
}
}
// BuildNode builts node object
func BuildNode(name string, alloc v1.ResourceList, labels map[string]string) *v1.Node {
return &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: labels,
},
Status: v1.NodeStatus{
Capacity: alloc,
Allocatable: alloc,
},
}
}
// BuildPod builts Pod object
func BuildPod(namespace, name, nodename string, p v1.PodPhase, req v1.ResourceList, groupName string, labels map[string]string, selector map[string]string) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: types.UID(fmt.Sprintf("%v-%v", namespace, name)),
Name: name,
Namespace: namespace,
Labels: labels,
Annotations: map[string]string{
kbv1.GroupNameAnnotationKey: groupName,
},
},
Status: v1.PodStatus{
Phase: p,
},
Spec: v1.PodSpec{
NodeName: nodename,
NodeSelector: selector,
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: req,
},
},
},
},
}
}
// FakeBinder is used as fake binder
type FakeBinder struct {
sync.Mutex
Binds map[string]string
Channel chan string
}
// Bind used by fake binder struct to bind pods
func (fb *FakeBinder) Bind(p *v1.Pod, hostname string) error {
fb.Lock()
defer fb.Unlock()
key := fmt.Sprintf("%v/%v", p.Namespace, p.Name)
fb.Binds[key] = hostname
fb.Channel <- key
return nil
}
// FakeEvictor is used as fake evictor
type FakeEvictor struct {
sync.Mutex
Evicts []string
Channel chan string
}
// Evict is used by fake evictor to evict pods
func (fe *FakeEvictor) Evict(p *v1.Pod) error {
fe.Lock()
defer fe.Unlock()
fmt.Println("PodName: ", p.Name)
key := fmt.Sprintf("%v/%v", p.Namespace, p.Name)
fe.Evicts = append(fe.Evicts, key)
fe.Channel <- key
return nil
}
// FakeStatusUpdater is used for fake status update
type FakeStatusUpdater struct {
}
// UpdatePodCondition is a empty function
func (ftsu *FakeStatusUpdater) UpdatePodCondition(pod *v1.Pod, podCondition *v1.PodCondition) (*v1.Pod, error) {
// do nothing here
return nil, nil
}
// UpdatePodGroup is a empty function
func (ftsu *FakeStatusUpdater) UpdatePodGroup(pg *kbv1.PodGroup) (*kbv1.PodGroup, error) {
// do nothing here
return nil, nil
}
// FakeVolumeBinder is used as fake volume binder
type FakeVolumeBinder struct {
}
// AllocateVolumes is a empty function
func (fvb *FakeVolumeBinder) AllocateVolumes(task *api.TaskInfo, hostname string) error {
return nil
}
// BindVolumes is a empty function
func (fvb *FakeVolumeBinder) BindVolumes(task *api.TaskInfo) error {
return nil
}

View File

@ -0,0 +1,52 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package version
import (
"fmt"
"os"
"runtime"
)
var (
// Version shows the version of kube batch.
Version = "Not provided."
// GitSHA shoows the git commit id of kube batch.
GitSHA = "Not provided."
// Built shows the built time of the binary.
Built = "Not provided."
)
// PrintVersionAndExit prints versions from the array returned by Info() and exit
func PrintVersionAndExit(apiVersion string) {
for _, i := range Info(apiVersion) {
fmt.Printf("%v\n", i)
}
os.Exit(0)
}
// Info returns an array of various service versions
func Info(apiVersion string) []string {
return []string{
fmt.Sprintf("API Version: %s", apiVersion),
fmt.Sprintf("Version: %s", Version),
fmt.Sprintf("Git SHA: %s", GitSHA),
fmt.Sprintf("Built At: %s", Built),
fmt.Sprintf("Go Version: %s", runtime.Version()),
fmt.Sprintf("Go OS/Arch: %s/%s", runtime.GOOS, runtime.GOARCH),
}
}