Compare commits
No commits in common. "main" and "dragonfly-1.1.30" have entirely different histories.
main
...
dragonfly-
|
@ -1,17 +0,0 @@
|
|||
# Set to true to add reviewers to pull requests
|
||||
addReviewers: true
|
||||
|
||||
# Set to true to add assignees to pull requests
|
||||
addAssignees: author
|
||||
|
||||
# A list of reviewers to be added to pull requests (GitHub user name)
|
||||
reviewers:
|
||||
- gaius-qi
|
||||
- yxxhero
|
||||
- chlins
|
||||
- CormickKneey
|
||||
- imeoer
|
||||
- BraveY
|
||||
|
||||
# A number of reviewers added to the pull request
|
||||
numberOfReviewers: 3
|
|
@ -1,11 +0,0 @@
|
|||
name: "Auto Assign"
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [opened, reopened, ready_for_review]
|
||||
|
||||
jobs:
|
||||
add-assignee:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: kentaro-m/auto-assign-action@9f6dbe84a80c6e7639d1b9698048b201052a2a94
|
|
@ -16,7 +16,7 @@ jobs:
|
|||
uses: actions/checkout@v4
|
||||
|
||||
- id: filter
|
||||
uses: dorny/paths-filter@v3.0.2
|
||||
uses: dorny/paths-filter@v2.11.1
|
||||
with:
|
||||
filters: |
|
||||
charts:
|
||||
|
@ -37,16 +37,16 @@ jobs:
|
|||
run: git fetch --prune --unshallow
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v4
|
||||
uses: azure/setup-helm@v3
|
||||
with:
|
||||
version: v3.7.2
|
||||
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: 3.9
|
||||
python-version: 3.7
|
||||
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@v2.7.0
|
||||
uses: helm/chart-testing-action@v2.6.1
|
||||
|
||||
- name: Run chart-testing (list-changed)
|
||||
id: list-changed
|
||||
|
@ -60,17 +60,14 @@ jobs:
|
|||
run: ct lint --config ./.github/ct.yaml
|
||||
|
||||
- name: Create kind cluster
|
||||
uses: helm/kind-action@v1.12.0
|
||||
uses: helm/kind-action@v1.8.0
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
|
||||
- name: Add bitnami chart repos
|
||||
run: helm repo add bitnami https://charts.bitnami.com/bitnami
|
||||
|
||||
- name: Add dragonfly chart repos
|
||||
run: helm repo add dragonfly https://dragonflyoss.github.io/helm-charts/
|
||||
|
||||
- name: Add nydus-snapshotter chart repos
|
||||
run: helm repo add nydus-snapshotter https://dragonflyoss.github.io/helm-charts/
|
||||
- name: Add jaegertracing chart repos
|
||||
run: helm repo add jaegertracing https://jaegertracing.github.io/helm-charts
|
||||
|
||||
- name: Run chart-testing (install)
|
||||
run: ct install --config ./.github/ct.yaml
|
||||
|
|
|
@ -1,20 +0,0 @@
|
|||
name: PR Label
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, labeled, unlabeled, synchronize]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
classify:
|
||||
name: Classify PR
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: PR impact specified
|
||||
uses: mheap/github-action-required-labels@8afbe8ae6ab7647d0c9f0cfa7c2f939650d22509 # v5.5
|
||||
with:
|
||||
mode: exactly
|
||||
count: 1
|
||||
labels: 'bug, enhancement, documentation, dependencies'
|
|
@ -5,7 +5,6 @@ on:
|
|||
branches:
|
||||
- main
|
||||
paths:
|
||||
- charts/dragonfly-stack/Chart.yaml
|
||||
- charts/dragonfly/Chart.yaml
|
||||
- charts/nydus-snapshotter/Chart.yaml
|
||||
|
||||
|
@ -24,6 +23,6 @@ jobs:
|
|||
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||
|
||||
- name: Run chart-releaser
|
||||
uses: helm/chart-releaser-action@v1.7.0
|
||||
uses: helm/chart-releaser-action@v1.6.0
|
||||
env:
|
||||
CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||
|
|
|
@ -1,31 +0,0 @@
|
|||
name: Close stale issues and PRs
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: "0 0 * * *"
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9.1.0
|
||||
id: stale
|
||||
with:
|
||||
delete-branch: true
|
||||
days-before-close: 7
|
||||
days-before-stale: 90
|
||||
days-before-pr-close: 7
|
||||
days-before-pr-stale: 120
|
||||
stale-issue-label: "stale"
|
||||
exempt-issue-labels: bug,wip,on-hold
|
||||
exempt-pr-labels: bug,wip,on-hold
|
||||
exempt-all-milestones: true
|
||||
stale-issue-message: 'This issue is stale because it has been open 90 days with no activity.'
|
||||
close-issue-message: 'This issue was closed because it has been stalled for 7 days with no activity.'
|
||||
stale-pr-message: 'This PR is stale because it has been open 120 days with no activity.'
|
||||
close-pr-message: 'This PR was closed because it has been stalled for 7 days with no activity.'
|
56
INSTALL.md
56
INSTALL.md
|
@ -22,7 +22,7 @@ This document will help you experience how to use [Dragonfly](https://d7y.io) &
|
|||
Download containerd configuration for kind.
|
||||
|
||||
```shell
|
||||
curl -fsSL -o config.toml https://raw.githubusercontent.com/dragonflyoss/dragonfly/main/test/testdata/containerd/config.toml
|
||||
curl -fsSL -o config.toml https://raw.githubusercontent.com/dragonflyoss/Dragonfly2/main/test/testdata/containerd/config.toml
|
||||
```
|
||||
|
||||
Create kind cluster configuration file `kind-config.yaml`, configuration content is as follows:
|
||||
|
@ -37,8 +37,8 @@ nodes:
|
|||
- role: control-plane
|
||||
image: kindest/node:v1.23.17
|
||||
extraPortMappings:
|
||||
- containerPort: 4001
|
||||
hostPort: 4001
|
||||
- containerPort: 65001
|
||||
hostPort: 65001
|
||||
protocol: TCP
|
||||
extraMounts:
|
||||
- hostPath: ./config.toml
|
||||
|
@ -83,7 +83,7 @@ Install Dragonfly using the configuration:
|
|||
|
||||
```shell
|
||||
$ helm repo add dragonfly https://dragonflyoss.github.io/helm-charts/
|
||||
$ helm install --wait --timeout 10m --dependency-update --create-namespace --namespace dragonfly-system dragonfly dragonfly/dragonfly --set client.config.proxy.prefetch=true,seedClient.config.proxy.prefetch=true
|
||||
$ helm install --wait --timeout 10m --dependency-update --create-namespace --namespace dragonfly-system dragonfly dragonfly/dragonfly --set dfdaemon.config.download.prefetch=true,seedPeer.config.download.prefetch=true
|
||||
NAME: dragonfly
|
||||
LAST DEPLOYED: Fri Apr 7 10:35:12 2023
|
||||
NAMESPACE: dragonfly-system
|
||||
|
@ -97,7 +97,12 @@ NOTES:
|
|||
kubectl --namespace dragonfly-system port-forward $SCHEDULER_POD_NAME 8002:$SCHEDULER_CONTAINER_PORT
|
||||
echo "Visit http://127.0.0.1:8002 to use your scheduler"
|
||||
|
||||
2. Configure runtime to use dragonfly:
|
||||
2. Get the dfdaemon port by running these commands:
|
||||
export DFDAEMON_POD_NAME=$(kubectl get pods --namespace dragonfly-system -l "app=dragonfly,release=dragonfly,component=dfdaemon" -o jsonpath={.items[0].metadata.name})
|
||||
export DFDAEMON_CONTAINER_PORT=$(kubectl get pod --namespace dragonfly-system $DFDAEMON_POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
|
||||
You can use $DFDAEMON_CONTAINER_PORT as a proxy port in Node.
|
||||
|
||||
3. Configure runtime to use dragonfly:
|
||||
https://d7y.io/docs/getting-started/quick-start/kubernetes/
|
||||
```
|
||||
|
||||
|
@ -107,7 +112,7 @@ Check that Dragonfly is deployed successfully:
|
|||
|
||||
```shell
|
||||
$ kubectl wait po --all -n dragonfly-system --for=condition=ready --timeout=10m
|
||||
pod/dragonfly-client-gs924 condition met
|
||||
pod/dragonfly-dfdaemon-gs924 condition met
|
||||
pod/dragonfly-manager-5d97fd88fb-txnw9 condition met
|
||||
pod/dragonfly-manager-5d97fd88fb-v2nmh condition met
|
||||
pod/dragonfly-manager-5d97fd88fb-xg6wr condition met
|
||||
|
@ -119,9 +124,9 @@ pod/dragonfly-redis-replicas-2 condition met
|
|||
pod/dragonfly-scheduler-0 condition met
|
||||
pod/dragonfly-scheduler-1 condition met
|
||||
pod/dragonfly-scheduler-2 condition met
|
||||
pod/dragonfly-seed-client-0 condition met
|
||||
pod/dragonfly-seed-client-1 condition met
|
||||
pod/dragonfly-seed-client-2 condition met
|
||||
pod/dragonfly-seed-peer-0 condition met
|
||||
pod/dragonfly-seed-peer-1 condition met
|
||||
pod/dragonfly-seed-peer-2 condition met
|
||||
```
|
||||
|
||||
## Install Nydus based on Helm Charts
|
||||
|
@ -132,7 +137,7 @@ Install Nydus using the default configuration, for more information about mirror
|
|||
<!-- markdownlint-disable -->
|
||||
|
||||
```shell
|
||||
$ curl -fsSL -o config-nydus.yaml https://raw.githubusercontent.com/dragonflyoss/dragonfly/main/test/testdata/charts/config-nydus.yaml
|
||||
$ curl -fsSL -o config-nydus.yaml https://raw.githubusercontent.com/dragonflyoss/Dragonfly2/main/test/testdata/charts/config-nydus.yaml
|
||||
$ helm install --wait --timeout 10m --dependency-update --create-namespace --namespace nydus-snapshotter nydus-snapshotter dragonfly/nydus-snapshotter -f config-nydus.yaml
|
||||
NAME: nydus-snapshotter
|
||||
LAST DEPLOYED: Fri Apr 7 10:40:50 2023
|
||||
|
@ -210,9 +215,15 @@ Verify downloaded Nydus image via Dragonfly based on mirror mode:
|
|||
<!-- markdownlint-disable -->
|
||||
|
||||
```shell
|
||||
$ CLIENT_POD_NAME=`kubectl -n dragonfly-system get pod -l component=client --no-headers -o custom-columns=NAME:metadata.name`
|
||||
$ kubectl -n dragonfly-system exec -it ${CLIENT_POD_NAME} -- sh -c 'grep "download task succeeded" /var/log/dragonfly/dfdaemon/dfdaemon.log'
|
||||
2024-05-28T12:36:24.861903Z INFO download_task: dragonfly-client/src/grpc/dfdaemon_download.rs:276: download task succeeded host_id="127.0.0.1-kind-worker" task_id="4535f073321f0d1908b8c3ad63a1d59324573c0083961c5bcb7f38ac72ad598d" peer_id="127.0.0.1-kind-worker-13095fb5-786a-4908-b8c1-744be144b383"
|
||||
$ DFDAEMON_POD_NAME=`kubectl -n dragonfly-system get pod -l component=dfdaemon --no-headers -o custom-columns=NAME:metadata.name`
|
||||
$ kubectl -n dragonfly-system exec -it ${DFDAEMON_POD_NAME} -- sh -c 'grep "peer task done" /var/log/dragonfly/daemon/core.log'
|
||||
{"level":"info","ts":"2023-04-10 07:30:57.596","caller":"peer/peertask_conductor.go:1330","msg":"peer task done, cost: 1116ms","peer":"10.244.0.5-1-53419631-8a14-4325-b5f2-c4ef01a02853","task":"d6a7aaa926dccd3376f91378f58d3a1a0871302d0afee718fd991a6849b422a7","component":"PeerTask","trace":"977c114a06b6d3a12fc680b28b57a43d"}
|
||||
{"level":"info","ts":"2023-04-10 07:30:58.594","caller":"peer/peertask_conductor.go:1330","msg":"peer task done, cost: 2116ms","peer":"10.244.0.5-1-4c45ed29-4931-4cfc-a8e7-ba06a7575518","task":"984629e0ba47eeccd65ffea34d1369d71bb821169c83918795cceb4e9774d3eb","component":"PeerTask","trace":"e9249680e787c9a13935aee1b280665a"}
|
||||
{"level":"info","ts":"2023-04-10 07:30:58.598","caller":"peer/peertask_conductor.go:1330","msg":"peer task done, cost: 2133ms","peer":"10.244.0.5-1-45e3cd5b-cac6-43f0-be82-398cab978e83","task":"571f792ad3e2b12cc28407f8f14d17a44925e0151aff947773bdac5bec64b8d6","component":"PeerTask","trace":"f4e79e09ac293603875b9542c9b24bb4"}
|
||||
{"level":"info","ts":"2023-04-10 07:30:58.905","caller":"peer/peertask_conductor.go:1330","msg":"peer task done, cost: 2377ms","peer":"10.244.0.5-1-6d51916a-13cb-4e50-8ba0-886e786e32eb","task":"023b961410d8776250215268f3569fa4ccb01bf1c557ca0e73888c4dd8c23ace","component":"PeerTask","trace":"285f5ecf084873e4311526136438d571"}
|
||||
{"level":"info","ts":"2023-04-10 07:30:59.452","caller":"peer/peertask_conductor.go:1330","msg":"peer task done, cost: 538ms","peer":"10.244.0.5-1-b7b2235f-4b0f-4253-8a1f-cdf7bd86f096","task":"23dee111679d459440e4839200940534037f1ba101bd7b7af57c9b7123f96882","component":"PeerTask","trace":"63d5147c7bd01455ce3c537f18463b12"}
|
||||
{"level":"info","ts":"2023-04-10 07:31:01.722","caller":"peer/peertask_conductor.go:1330","msg":"peer task done, cost: 4123ms","peer":"10.244.0.5-1-0dbbfe12-df46-4e3b-98dc-fa6c8f2a514c","task":"15c51bc09cf57b4c5c1c04e9cbdf17fa4560c6ad10b5b32680f0b8cd63bb900b","component":"PeerTask","trace":"b9bcac5bfe5d1f1871db22911d7d71b5"}
|
||||
{"level":"info","ts":"2023-04-10 07:31:02.897","caller":"peer/peertask_conductor.go:1330","msg":"peer task done, cost: 6483ms","peer":"10.244.0.5-1-be485ea5-6d54-4f56-8f56-bdbe76ec8469","task":"0fe34e3fcb64d49b09fe7c759f47a373b7590fe4dbe1da6d9c732eee516e4cb4","component":"PeerTask","trace":"daa2ffd1021779dfbd3162ead765e0ba"}
|
||||
```
|
||||
|
||||
<!-- markdownlint-restore -->
|
||||
|
@ -235,7 +246,7 @@ Create Draognfly configuration file `d7y-config.yaml`, configuration content is
|
|||
|
||||
```shell
|
||||
cat <<EOF > d7y-config.yaml
|
||||
seedClient:
|
||||
seedPeer:
|
||||
persistence:
|
||||
storageClass: "alicloud-disk-essd"
|
||||
size: 20Gi
|
||||
|
@ -266,7 +277,7 @@ Install Dragonfly using the params:
|
|||
|
||||
```shell
|
||||
$ helm repo add dragonfly https://dragonflyoss.github.io/helm-charts/
|
||||
$ helm install --wait --timeout 10m --dependency-update --create-namespace --namespace dragonfly-system dragonfly dragonfly/dragonfly --set client.config.proxy.prefetch=true,seedClient.config.proxy.prefetch=true-f d7y-config.yaml
|
||||
$ helm install --wait --timeout 10m --dependency-update --create-namespace --namespace dragonfly-system dragonfly dragonfly/dragonfly --set dfdaemon.config.download.prefetch=true,seedPeer.config.download.prefetch=true-f d7y-config.yaml
|
||||
NAME: dragonfly
|
||||
LAST DEPLOYED: Fri Apr 7 10:35:12 2023
|
||||
NAMESPACE: dragonfly-system
|
||||
|
@ -280,7 +291,12 @@ NOTES:
|
|||
kubectl --namespace dragonfly-system port-forward $SCHEDULER_POD_NAME 8002:$SCHEDULER_CONTAINER_PORT
|
||||
echo "Visit http://127.0.0.1:8002 to use your scheduler"
|
||||
|
||||
2. Configure runtime to use dragonfly:
|
||||
2. Get the dfdaemon port by running these commands:
|
||||
export DFDAEMON_POD_NAME=$(kubectl get pods --namespace dragonfly-system -l "app=dragonfly,release=dragonfly,component=dfdaemon" -o jsonpath={.items[0].metadata.name})
|
||||
export DFDAEMON_CONTAINER_PORT=$(kubectl get pod --namespace dragonfly-system $DFDAEMON_POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
|
||||
You can use $DFDAEMON_CONTAINER_PORT as a proxy port in Node.
|
||||
|
||||
3. Configure runtime to use dragonfly:
|
||||
https://d7y.io/docs/getting-started/quick-start/kubernetes/
|
||||
```
|
||||
|
||||
|
@ -290,7 +306,7 @@ Check that Dragonfly is deployed successfully:
|
|||
|
||||
```shell
|
||||
$ kubectl wait po --all -n dragonfly-system --for=condition=ready --timeout=10m
|
||||
pod/dragonfly-client-gs924 condition met
|
||||
pod/dragonfly-dfdaemon-gs924 condition met
|
||||
pod/dragonfly-manager-5d97fd88fb-txnw9 condition met
|
||||
pod/dragonfly-manager-5d97fd88fb-v2nmh condition met
|
||||
pod/dragonfly-manager-5d97fd88fb-xg6wr condition met
|
||||
|
@ -302,7 +318,7 @@ pod/dragonfly-redis-replicas-2 condition met
|
|||
pod/dragonfly-scheduler-0 condition met
|
||||
pod/dragonfly-scheduler-1 condition met
|
||||
pod/dragonfly-scheduler-2 condition met
|
||||
pod/dragonfly-seed-client-0 condition met
|
||||
pod/dragonfly-seed-client-1 condition met
|
||||
pod/dragonfly-seed-client-2 condition met
|
||||
pod/dragonfly-seed-peer-0 condition met
|
||||
pod/dragonfly-seed-peer-1 condition met
|
||||
pod/dragonfly-seed-peer-2 condition met
|
||||
```
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
# Dragonfly Community Helm Charts
|
||||
|
||||
[](https://artifacthub.io/packages/helm/dragonfly/dragonfly-stack)
|
||||
[](https://artifacthub.io/packages/helm/dragonfly/dragonfly)
|
||||
[](https://artifacthub.io/packages/helm/dragonfly/nydus-snapshotter)
|
||||

|
||||
|
@ -21,19 +20,18 @@ Please refer to the [document][install] to install Dragonfly & Nydus on Kubernet
|
|||
|
||||
## Documentation
|
||||
|
||||
- [Install Dragonfly Stack on Kubernetes](./charts/dragonfly-stack/README.md)
|
||||
- [Install Dragonfly & Nydus on Kubernetes][install]
|
||||
- [Install Dragonfly on Kubernetes](./charts/dragonfly/README.md)
|
||||
- [Install Nydus on Kubernetes](./charts/nydus-snapshotter/README.md)
|
||||
- [Install Dragonfly & Nydus on Kubernetes][install]
|
||||
|
||||
## Community
|
||||
|
||||
Join the conversation and help the community.
|
||||
|
||||
- **Slack Channel**: [#dragonfly](https://cloud-native.slack.com/messages/dragonfly/) on [CNCF Slack](https://slack.cncf.io/)
|
||||
- **Github Discussions**: [Dragonfly Discussion Forum](https://github.com/dragonflyoss/dragonfly/discussions)
|
||||
- **Discussion Group**: <dragonfly-discuss@googlegroups.com>
|
||||
- **Developer Group**: <dragonfly-developers@googlegroups.com>
|
||||
- **Maintainer Group**: <dragonfly-maintainers@googlegroups.com>
|
||||
- **Github Discussions**: [Dragonfly Discussion Forum][discussion]
|
||||
- **Twitter**: [@dragonfly_oss](https://twitter.com/dragonfly_oss)
|
||||
- **DingTalk**: [22880028764](https://qr.dingtalk.com/action/joingroup?code=v1,k1,pkV9IbsSyDusFQdByPSK3HfCG61ZCLeb8b/lpQ3uUqI=&_dt_no_comment=1&origin=11)
|
||||
|
||||
|
|
|
@ -1,23 +0,0 @@
|
|||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
|
@ -1,54 +0,0 @@
|
|||
apiVersion: v2
|
||||
name: dragonfly-stack
|
||||
description: Collects Dragonfly component and Nydus component into a single chart to provide a complete solution for the Dragonfly stack.
|
||||
icon: https://raw.githubusercontent.com/dragonflyoss/dragonfly/main/docs/images/logo/dragonfly.svg
|
||||
type: application
|
||||
version: 0.1.2
|
||||
appVersion: 2.1.49
|
||||
keywords:
|
||||
- dragonfly-stack
|
||||
- dragonfly
|
||||
- nydus
|
||||
- d7y
|
||||
- P2P
|
||||
- image
|
||||
|
||||
maintainers:
|
||||
- name: gaius-qi
|
||||
email: gaius.qi@gmail.com
|
||||
- name: imeoer
|
||||
email: imeoer@gmail.com
|
||||
- name: adamqqqplay
|
||||
email: adamqqq@163.com
|
||||
|
||||
home: https://d7y.io/
|
||||
|
||||
sources:
|
||||
- https://github.com/dragonflyoss/dragonfly
|
||||
- https://github.com/dragonflyoss/client
|
||||
- https://github.com/dragonflyoss/image-service
|
||||
- https://github.com/containerd/nydus-snapshotter/
|
||||
|
||||
annotations:
|
||||
artifacthub.io/changes: |
|
||||
- Rename repo Dragonfly2 to dragonfly.
|
||||
|
||||
artifacthub.io/links: |
|
||||
- name: Chart Source
|
||||
url: https://github.com/dragonflyoss/helm-charts
|
||||
- name: Source
|
||||
url: https://github.com/dragonflyoss/dragonfly
|
||||
- name: Source
|
||||
url: https://github.com/dragonflyoss/client
|
||||
- name: Source
|
||||
url: https://github.com/containerd/nydus-snapshotter
|
||||
|
||||
dependencies:
|
||||
- name: dragonfly
|
||||
version: 1.1.67
|
||||
repository: https://dragonflyoss.github.io/helm-charts/
|
||||
condition: dragonfly.enable
|
||||
- name: nydus-snapshotter
|
||||
version: 0.0.10
|
||||
repository: https://dragonflyoss.github.io/helm-charts/
|
||||
condition: nydus-snapshotter.enable
|
File diff suppressed because one or more lines are too long
|
@ -1,133 +0,0 @@
|
|||
# Dragonfly Stack Helm Chart
|
||||
|
||||
[](https://artifacthub.io/packages/search?repo=dragonfly-stack)
|
||||
|
||||
Collects Dragonfly component and Nydus component into a single chart to provide a complete solution for the Dragonfly stack.
|
||||
|
||||
## TL;DR
|
||||
|
||||
```shell
|
||||
helm repo add dragonfly-stack https://dragonflyoss.github.io/helm-charts/
|
||||
helm install --create-namespace --namespace dragonfly-system dragonfly dragonfly/dragonfly-stask
|
||||
```
|
||||
|
||||
## Introduction
|
||||
|
||||
Dragonfly is an open source intelligent P2P based image and file distribution system. Its goal is to tackle all distribution problems in cloud native scenarios. Currently Dragonfly focuses on being:
|
||||
|
||||
- Simple: well-defined user-facing API (HTTP), non-invasive to all container engines;
|
||||
- Efficient: Seed peer support, P2P based file distribution to save enterprise bandwidth;
|
||||
- Intelligent: host level speed limit, intelligent flow control due to host detection;
|
||||
- Secure: block transmission encryption, HTTPS connection support.
|
||||
|
||||
Dragonfly is now hosted by the Cloud Native Computing Foundation (CNCF) as an Incubating Level Project. Originally it was born to solve all kinds of distribution at very large scales, such as application distribution, cache distribution, log distribution, image distribution, and so on.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Kubernetes cluster 1.20+
|
||||
- Helm v3.8.0+
|
||||
|
||||
## Installation Guide
|
||||
|
||||
When use Dragonfly in Kubernetes, a container runtime must be configured. These work can be done by init script in this charts.
|
||||
|
||||
For more detail about installation is available in [Kubernetes with Dragonfly](https://d7y.io/docs/getting-started/quick-start/kubernetes/)
|
||||
|
||||
We recommend read the details about [Kubernetes with Dragonfly](https://d7y.io/docs/getting-started/quick-start/kubernetes/) before install.
|
||||
|
||||
> **We did not recommend to using dragonfly with docker in Kubernetes** due to many reasons: 1. no fallback image pulling policy. 2. deprecated in Kubernetes.
|
||||
|
||||
## Installation
|
||||
|
||||
### Install with custom configuration
|
||||
|
||||
Create the `values.yaml` configuration file. It is recommended to use external redis and mysql instead of containers. This example uses external mysql and redis.
|
||||
|
||||
```yaml
|
||||
dragonfly:
|
||||
mysql:
|
||||
enable: false
|
||||
|
||||
externalMysql:
|
||||
migrate: true
|
||||
host: mysql-host
|
||||
username: dragonfly
|
||||
password: dragonfly
|
||||
database: manager
|
||||
port: 3306
|
||||
|
||||
redis:
|
||||
enable: false
|
||||
|
||||
externalRedis:
|
||||
addrs:
|
||||
- redis.example.com:6379
|
||||
password: dragonfly
|
||||
```
|
||||
|
||||
Install dragonfly-stack chart with release name `dragonfly`:
|
||||
|
||||
```shell
|
||||
helm repo add dragonfly-stack https://dragonflyoss.github.io/helm-charts/
|
||||
helm install --create-namespace --namespace dragonfly-system dragonfly dragonfly/dragonfly-stack -f values.yaml
|
||||
```
|
||||
|
||||
### Install with an existing manager
|
||||
|
||||
Create the `values.yaml` configuration file. Need to configure the cluster id associated with scheduler and seed peer. This example is to deploy a cluster using the existing manager and redis.
|
||||
|
||||
```yaml
|
||||
dragonfly:
|
||||
scheduler:
|
||||
config:
|
||||
manager:
|
||||
schedulerClusterID: 1
|
||||
|
||||
seedClient:
|
||||
config:
|
||||
seedPeer:
|
||||
enable: true
|
||||
type: super
|
||||
clusterID: 1
|
||||
|
||||
manager:
|
||||
enable: false
|
||||
|
||||
externalManager:
|
||||
enable: true
|
||||
host: "dragonfly-manager.dragonfly-system.svc.cluster.local"
|
||||
restPort: 8080
|
||||
grpcPort: 65003
|
||||
|
||||
redis:
|
||||
enable: false
|
||||
|
||||
externalRedis:
|
||||
addrs:
|
||||
- redis.example.com:6379
|
||||
password: dragonfly
|
||||
|
||||
mysql:
|
||||
enable: false
|
||||
```
|
||||
|
||||
Install dragonfly-stack chart with release name `dragonfly`:
|
||||
|
||||
```shell
|
||||
helm repo add dragonfly-stack https://dragonflyoss.github.io/helm-charts/
|
||||
helm install --create-namespace --namespace dragonfly-system dragonfly dragonfly/dragonfly-stack -f values.yaml
|
||||
```
|
||||
|
||||
## Uninstall
|
||||
|
||||
Uninstall the `dragonfly` deployment:
|
||||
|
||||
```shell
|
||||
helm delete dragonfly --namespace dragonfly-system
|
||||
```
|
||||
|
||||
{{ template "chart.valuesSection" . }}
|
||||
|
||||
## Chart dependencies
|
||||
|
||||
{{ template "chart.requirementsTable" . }}
|
File diff suppressed because it is too large
Load Diff
|
@ -1,10 +1,10 @@
|
|||
apiVersion: v2
|
||||
name: dragonfly
|
||||
description: Dragonfly is an intelligent P2P based image and file distribution system
|
||||
icon: https://raw.githubusercontent.com/dragonflyoss/dragonfly/main/docs/images/logo/dragonfly.svg
|
||||
icon: https://raw.githubusercontent.com/dragonflyoss/Dragonfly2/main/docs/images/logo/dragonfly.svg
|
||||
type: application
|
||||
version: 1.4.4
|
||||
appVersion: 2.3.1-rc.2
|
||||
version: 1.1.30
|
||||
appVersion: 2.1.30
|
||||
keywords:
|
||||
- dragonfly
|
||||
- d7y
|
||||
|
@ -12,49 +12,53 @@ keywords:
|
|||
- image
|
||||
|
||||
maintainers:
|
||||
- name: jim3ma
|
||||
email: majinjing3@gmail.com
|
||||
- name: gaius-qi
|
||||
email: gaius.qi@gmail.com
|
||||
- name: yxxhero
|
||||
email: aiopsclub@163.com
|
||||
- name: jim3ma
|
||||
email: majinjing3@gmail.com
|
||||
|
||||
home: https://d7y.io/
|
||||
|
||||
sources:
|
||||
- https://github.com/dragonflyoss/dragonfly
|
||||
- https://github.com/dragonflyoss/client
|
||||
- https://github.com/dragonflyoss/Dragonfly2
|
||||
|
||||
annotations:
|
||||
artifacthub.io/changes: |
|
||||
- Bump Dragonfly to v2.3.1-rc.2.
|
||||
- Bump Client to v1.0.9.
|
||||
- Optimize scheduler default config.
|
||||
|
||||
artifacthub.io/links: |
|
||||
- name: Chart Source
|
||||
url: https://github.com/dragonflyoss/helm-charts
|
||||
- name: Source
|
||||
url: https://github.com/dragonflyoss/dragonfly
|
||||
- name: Source
|
||||
url: https://github.com/dragonflyoss/client
|
||||
url: https://github.com/dragonflyoss/Dragonfly2
|
||||
artifacthub.io/images: |
|
||||
- name: manager
|
||||
image: dragonflyoss/manager:v2.3.1-rc.2
|
||||
image: dragonflyoss/manager:v2.1.30
|
||||
- name: scheduler
|
||||
image: dragonflyoss/scheduler:v2.3.1-rc.2
|
||||
image: dragonflyoss/scheduler:v2.1.30
|
||||
- name: client
|
||||
image: dragonflyoss/client:v1.0.9
|
||||
image: dragonflyoss/client:v0.1.13
|
||||
- name: seed-client
|
||||
image: dragonflyoss/client:v1.0.9
|
||||
- name: dfinit
|
||||
image: dragonflyoss/dfinit:v1.0.9
|
||||
image: dragonflyoss/client:v0.1.13
|
||||
- name: dfdaemon
|
||||
image: dragonflyoss/dfdaemon:v2.1.30
|
||||
- name: trainer
|
||||
image: dragonflyoss/scheduler:v2.1.30
|
||||
- name: triton
|
||||
image: nvcr.io/nvidia/tritonserver:23.06-py3
|
||||
|
||||
dependencies:
|
||||
- name: mysql
|
||||
version: 10.1.1
|
||||
version: 9.4.6
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
condition: mysql.enable
|
||||
- name: redis
|
||||
version: 19.5.5
|
||||
version: 17.4.3
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
condition: redis.enable
|
||||
- name: jaeger
|
||||
version: 0.66.1
|
||||
repository: https://jaegertracing.github.io/helm-charts
|
||||
condition: jaeger.enable
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -81,12 +81,13 @@ scheduler:
|
|||
manager:
|
||||
schedulerClusterID: 1
|
||||
|
||||
seedClient:
|
||||
seedPeer:
|
||||
config:
|
||||
seedPeer:
|
||||
enable: true
|
||||
type: super
|
||||
clusterID: 1
|
||||
scheduler:
|
||||
manager:
|
||||
seedPeer:
|
||||
enable: true
|
||||
clusterID: 1
|
||||
|
||||
manager:
|
||||
enable: false
|
||||
|
|
|
@ -1,14 +1,20 @@
|
|||
1. Get the manager address by running these commands:
|
||||
export MANAGER_POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ include "dragonfly.fullname" . }},release={{ .Release.Name }},component=manager" -o jsonpath={.items[0].metadata.name})
|
||||
export MANAGER_CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $MANAGER_POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
|
||||
kubectl --namespace {{ .Release.Namespace }} port-forward $MANAGER_POD_NAME 8080:$MANAGER_CONTAINER_PORT
|
||||
echo "Visit http://127.0.0.1:8080 to use your manager"
|
||||
|
||||
2. Get the scheduler address by running these commands:
|
||||
1. Get the scheduler address by running these commands:
|
||||
export SCHEDULER_POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ include "dragonfly.fullname" . }},release={{ .Release.Name }},component=scheduler" -o jsonpath={.items[0].metadata.name})
|
||||
export SCHEDULER_CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $SCHEDULER_POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
|
||||
kubectl --namespace {{ .Release.Namespace }} port-forward $SCHEDULER_POD_NAME 8002:$SCHEDULER_CONTAINER_PORT
|
||||
echo "Visit http://127.0.0.1:8002 to use your scheduler"
|
||||
|
||||
2. Get the dfdaemon port by running these commands:
|
||||
export DFDAEMON_POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ include "dragonfly.fullname" . }},release={{ .Release.Name }},component=dfdaemon" -o jsonpath={.items[0].metadata.name})
|
||||
export DFDAEMON_CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $DFDAEMON_POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
|
||||
You can use $DFDAEMON_CONTAINER_PORT as a proxy port in Node.
|
||||
|
||||
3. Configure runtime to use dragonfly:
|
||||
https://d7y.io/docs/getting-started/quick-start/kubernetes/
|
||||
|
||||
{{ if .Values.jaeger.enable }}
|
||||
4. Get Jaeger query URL by running these commands:
|
||||
export JAEGER_QUERY_PORT=$(kubectl --namespace {{ .Release.Namespace }} get services {{ .Release.Name }}-jaeger-query -o jsonpath="{.spec.ports[0].port}")
|
||||
kubectl --namespace {{ .Release.Namespace }} port-forward service/{{ .Release.Name }}-jaeger-query 16686:$JAEGER_QUERY_PORT
|
||||
echo "Visit http://127.0.0.1:16686/search?limit=20&lookback=1h&maxDuration&minDuration&service=dragonfly to query download events"
|
||||
{{- end }}
|
||||
|
|
|
@ -6,13 +6,6 @@ Expand the name of the chart.
|
|||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Allow the release namespace to be overridden for multi-namespace deployments in combined charts.
|
||||
*/}}
|
||||
{{- define "common.names.namespace" -}}
|
||||
{{- default .Release.Namespace .Values.namespaceOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
|
@ -63,122 +56,33 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this
|
|||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified dfinit name.
|
||||
Create a default fully qualified seed peer name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
*/}}
|
||||
{{- define "dragonfly.dfinit.fullname" -}}
|
||||
{{ template "dragonfly.fullname" . }}-dfinit
|
||||
{{- end -}}
|
||||
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Return the proper image name
|
||||
{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" .Values.global ) }}
|
||||
*/}}
|
||||
{{- define "common.images.image" -}}
|
||||
{{- $registryName := .imageRoot.registry -}}
|
||||
{{- $repositoryName := .imageRoot.repository -}}
|
||||
{{- $separator := ":" -}}
|
||||
{{- $termination := .imageRoot.tag | toString -}}
|
||||
{{- if .global }}
|
||||
{{- if .global.imageRegistry }}
|
||||
{{- $registryName = .global.imageRegistry -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- if .imageRoot.digest }}
|
||||
{{- $separator = "@" -}}
|
||||
{{- $termination = .imageRoot.digest | toString -}}
|
||||
{{- end -}}
|
||||
{{- if $registryName }}
|
||||
{{- printf "%s/%s%s%s" $registryName $repositoryName $separator $termination -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s%s%s" $repositoryName $separator $termination -}}
|
||||
{{- end -}}
|
||||
{{- define "dragonfly.seedPeer.fullname" -}}
|
||||
{{ template "dragonfly.fullname" . }}-{{ .Values.seedPeer.name }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return the proper image name (for the manager image)
|
||||
Create a default fully qualified dfdaemon name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
*/}}
|
||||
{{- define "manager.image" -}}
|
||||
{{- include "common.images.image" ( dict "imageRoot" .Values.manager.image "global" .Values.global ) -}}
|
||||
{{- define "dragonfly.dfdaemon.fullname" -}}
|
||||
{{ template "dragonfly.fullname" . }}-{{ .Values.dfdaemon.name }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return the proper image name (for the scheduler image)
|
||||
Create a default fully qualified trainer name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
*/}}
|
||||
{{- define "scheduler.image" -}}
|
||||
{{- include "common.images.image" ( dict "imageRoot" .Values.scheduler.image "global" .Values.global ) -}}
|
||||
{{- define "dragonfly.trainer.fullname" -}}
|
||||
{{ template "dragonfly.fullname" . }}-{{ .Values.trainer.name }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return the proper image name (for the client image)
|
||||
Create a default fully qualified triton name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
*/}}
|
||||
{{- define "client.image" -}}
|
||||
{{- include "common.images.image" ( dict "imageRoot" .Values.client.image "global" .Values.global ) -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return the proper image name (for the seedClient image)
|
||||
*/}}
|
||||
{{- define "seedClient.image" -}}
|
||||
{{- include "common.images.image" ( dict "imageRoot" .Values.seedClient.image "global" .Values.global ) -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return the proper image name (for the client dfinit image)
|
||||
*/}}
|
||||
{{- define "client.dfinit.image" -}}
|
||||
{{- include "common.images.image" ( dict "imageRoot" .Values.client.dfinit.image "global" .Values.global ) -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return the proper image name (for the manager initContainer image)
|
||||
*/}}
|
||||
{{- define "manager.initContainer.image" -}}
|
||||
{{- include "common.images.image" ( dict "imageRoot" .Values.manager.initContainer.image "global" .Values.global ) -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return the proper image name (for the scheduler initContainer image)
|
||||
*/}}
|
||||
{{- define "scheduler.initContainer.image" -}}
|
||||
{{- include "common.images.image" ( dict "imageRoot" .Values.scheduler.initContainer.image "global" .Values.global ) -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return the proper image name (for the client initContainer image)
|
||||
*/}}
|
||||
{{- define "client.initContainer.image" -}}
|
||||
{{- include "common.images.image" ( dict "imageRoot" .Values.client.initContainer.image "global" .Values.global ) -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return the proper image name (for the seedClient initContainer image)
|
||||
*/}}
|
||||
{{- define "seedClient.initContainer.image" -}}
|
||||
{{- include "common.images.image" ( dict "imageRoot" .Values.seedClient.initContainer.image "global" .Values.global ) -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Return the proper Storage Class
|
||||
{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }}
|
||||
*/}}
|
||||
{{- define "common.storage.class" -}}
|
||||
|
||||
{{- $storageClass := .persistence.storageClass -}}
|
||||
{{- if .global -}}
|
||||
{{- if .global.storageClass -}}
|
||||
{{- $storageClass = .global.storageClass -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- if $storageClass -}}
|
||||
{{- if (eq "-" $storageClass) -}}
|
||||
{{- printf "storageClassName: \"\"" -}}
|
||||
{{- else }}
|
||||
{{- printf "storageClassName: %s" $storageClass -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "dragonfly.triton.fullname" -}}
|
||||
{{ template "dragonfly.fullname" . }}-{{ .Values.triton.name }}
|
||||
{{- end -}}
|
||||
|
|
|
@ -3,9 +3,9 @@ apiVersion: v1
|
|||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "dragonfly.client.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.client.name }}
|
||||
|
@ -20,12 +20,13 @@ data:
|
|||
upload:
|
||||
{{ toYaml .Values.client.config.upload | indent 6 }}
|
||||
manager:
|
||||
addrs:
|
||||
{{- if .Values.client.config.manager.addrs }}
|
||||
addr: {{ .Values.client.config.manager.addr }}
|
||||
{{ toYaml .Values.client.config.manager.netAddrs | indent 6 }}
|
||||
{{- else if .Values.manager.enable }}
|
||||
addr: http://{{ template "dragonfly.manager.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.manager.grpcPort }}
|
||||
- http://{{ template "dragonfly.manager.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.manager.grpcPort }}
|
||||
{{- else }}
|
||||
addr: http://{{ .Values.externalManager.host }}:{{ .Values.externalManager.grpcPort }}
|
||||
- {{ .Values.externalManager.host }}:{{ .Values.externalManager.grpcPort }}
|
||||
{{- end }}
|
||||
scheduler:
|
||||
{{ toYaml .Values.client.config.scheduler | indent 6 }}
|
||||
|
@ -37,12 +38,14 @@ data:
|
|||
{{ toYaml .Values.client.config.gc | indent 6 }}
|
||||
proxy:
|
||||
{{ toYaml .Values.client.config.proxy | indent 6 }}
|
||||
health:
|
||||
{{ toYaml .Values.client.config.health | indent 6 }}
|
||||
security:
|
||||
{{ toYaml .Values.client.config.security | indent 6 }}
|
||||
metrics:
|
||||
{{ toYaml .Values.client.config.metrics | indent 6 }}
|
||||
stats:
|
||||
{{ toYaml .Values.client.config.stats | indent 6 }}
|
||||
{{- if .Values.client.config.tracing }}
|
||||
tracing:
|
||||
{{ toYaml .Values.client.config.tracing | indent 6 }}
|
||||
{{- else if .Values.jaeger.enable }}
|
||||
jaeger: http://{{ $.Release.Name }}-jaeger-collector.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }}:14268/api/traces
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
|
|
@ -3,9 +3,9 @@ apiVersion: apps/v1
|
|||
kind: DaemonSet
|
||||
metadata:
|
||||
name: {{ template "dragonfly.client.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.client.name }}
|
||||
|
@ -22,33 +22,31 @@ spec:
|
|||
matchLabels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
component: "{{ .Values.client.name }}"
|
||||
release: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
component: "{{ .Values.client.name }}"
|
||||
release: {{ .Release.Name }}
|
||||
{{- if .Values.client.podLabels }}
|
||||
{{ toYaml .Values.client.podLabels | indent 8 }}
|
||||
{{- end }}
|
||||
annotations:
|
||||
checksum/config: {{ include (print $.Template.BasePath "/client/client-configmap.yaml") . | sha256sum }}
|
||||
{{- if .Values.client.dfinit.enable }}
|
||||
checksum/dfinit-config: {{ include (print $.Template.BasePath "/client/dfinit-configmap.yaml") . | sha256sum }}
|
||||
{{- end }}
|
||||
{{- if .Values.client.podAnnotations }}
|
||||
{{ toYaml .Values.client.podAnnotations | indent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.client.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml .Values.client.nodeSelector | indent 8 }}
|
||||
{{- end }}
|
||||
hostNetwork: {{ .Values.client.hostNetwork }}
|
||||
{{- if .Values.client.hostNetwork }}
|
||||
dnsPolicy: "ClusterFirstWithHostNet"
|
||||
{{- end }}
|
||||
hostPID: {{ .Values.client.hostPID }}
|
||||
hostIPC: {{ .Values.client.hostIPC }}
|
||||
{{- with .Values.client.nodeSelector | default .Values.global.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.client.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml .Values.client.tolerations | indent 8 }}
|
||||
|
@ -60,10 +58,10 @@ spec:
|
|||
{{- if quote .Values.client.terminationGracePeriodSeconds }}
|
||||
terminationGracePeriodSeconds: {{ .Values.client.terminationGracePeriodSeconds }}
|
||||
{{- end }}
|
||||
{{- if .Values.client.priorityClassName }}
|
||||
{{- if and (.Capabilities.APIVersions.Has "scheduling.k8s.io/v1beta1") (.Values.client.priorityClassName) }}
|
||||
priorityClassName: {{ .Values.client.priorityClassName }}
|
||||
{{- end }}
|
||||
{{- with .Values.client.image.pullSecrets | default .Values.global.imagePullSecrets }}
|
||||
{{- with .Values.client.pullSecrets | default .Values.global.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
|
@ -74,81 +72,21 @@ spec:
|
|||
initContainers:
|
||||
{{- if .Values.scheduler.enable }}
|
||||
- name: wait-for-scheduler
|
||||
image: {{ template "client.initContainer.image" . }}
|
||||
imagePullPolicy: {{ .Values.client.initContainer.image.pullPolicy }}
|
||||
image: {{ .Values.client.initContainer.image }}:{{ .Values.client.initContainer.tag }}
|
||||
imagePullPolicy: {{ .Values.client.initContainer.pullPolicy }}
|
||||
command: ['sh', '-c', 'until nslookup {{ template "dragonfly.scheduler.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} && nc -vz {{ template "dragonfly.scheduler.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} {{ .Values.scheduler.config.server.port }}; do echo waiting for scheduler; sleep 2; done;']
|
||||
{{- end }}
|
||||
{{- if .Values.client.dfinit.enable }}
|
||||
- name: dfinit
|
||||
image: {{ template "client.dfinit.image" . }}
|
||||
imagePullPolicy: {{ .Values.client.dfinit.image.pullPolicy }}
|
||||
args:
|
||||
- --log-level={{ .Values.client.dfinit.config.log.level }}
|
||||
{{- if .Values.client.dfinit.config.console }}
|
||||
- --console
|
||||
{{- end }}
|
||||
resources:
|
||||
{{ toYaml .Values.client.initContainer.resources | indent 10 }}
|
||||
volumeMounts:
|
||||
- name: dfinit-config
|
||||
mountPath: "/etc/dragonfly"
|
||||
{{- if and (.Values.client.dfinit.enable) (.Values.client.dfinit.config.containerRuntime.containerd) }}
|
||||
- name: containerd-config-dir
|
||||
mountPath: {{ dir .Values.client.dfinit.config.containerRuntime.containerd.configPath }}
|
||||
{{- end }}
|
||||
{{- if and (.Values.client.dfinit.enable) (.Values.client.dfinit.config.containerRuntime.crio) }}
|
||||
- name: crio-config-dir
|
||||
mountPath: {{ dir .Values.client.dfinit.config.containerRuntime.crio.configPath }}
|
||||
{{- end }}
|
||||
{{- if and (.Values.client.dfinit.enable) (.Values.client.dfinit.config.containerRuntime.podman) }}
|
||||
- name: podman-config-dir
|
||||
mountPath: {{ dir .Values.client.dfinit.config.containerRuntime.podman.configPath }}
|
||||
{{- end }}
|
||||
{{- if and (.Values.client.dfinit.enable) (.Values.client.dfinit.config.containerRuntime.docker) }}
|
||||
- name: docker-config-dir
|
||||
mountPath: {{ dir .Values.client.dfinit.config.containerRuntime.docker.configPath }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if and .Values.client.dfinit.enable .Values.client.dfinit.restartContainerRuntime }}
|
||||
- name: restart-container-runtime
|
||||
image: {{ template "client.initContainer.image" . }}
|
||||
imagePullPolicy: {{ .Values.client.initContainer.image.pullPolicy }}
|
||||
securityContext:
|
||||
# nsenter need privilege permission.
|
||||
privileged: true
|
||||
command:
|
||||
- /bin/sh
|
||||
- -cx
|
||||
- |-
|
||||
{{- if .Values.client.dfinit.config.containerRuntime.containerd }}
|
||||
nsenter -t 1 -m -- systemctl restart containerd.service
|
||||
echo "restart container"
|
||||
{{- else if .Values.client.dfinit.config.containerRuntime.crio }}
|
||||
nsenter -t 1 -m -- systemctl restart crio.service
|
||||
echo "restart cri-o"
|
||||
{{- else if .Values.client.dfinit.config.containerRuntime.podman }}
|
||||
nsenter -t 1 -m -- systemctl restart podman.service
|
||||
echo "restart podman"
|
||||
{{- else if .Values.client.dfinit.config.containerRuntime.docker }}
|
||||
nsenter -t 1 -m -- systemctl restart docker.service
|
||||
echo "restart docker"
|
||||
{{- else }}
|
||||
echo "no container runtime to restart"
|
||||
{{- end }}
|
||||
resources:
|
||||
{{ toYaml .Values.client.initContainer.resources | indent 10 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: client
|
||||
image: {{ template "client.image" . }}
|
||||
imagePullPolicy: {{ .Values.client.image.pullPolicy | quote }}
|
||||
image: {{ .Values.client.image }}:{{ .Values.client.tag }}
|
||||
imagePullPolicy: {{ .Values.client.pullPolicy | quote }}
|
||||
args:
|
||||
- --log-level={{ .Values.client.config.log.level }}
|
||||
{{- if .Values.client.config.console }}
|
||||
- --console
|
||||
{{- if .Values.client.config.verbose }}
|
||||
- --verbose
|
||||
{{- end }}
|
||||
resources:
|
||||
{{ toYaml .Values.client.resources | indent 10 }}
|
||||
{{ toYaml .Values.client.resources | indent 12 }}
|
||||
env:
|
||||
{{- if .Values.client.maxProcs }}
|
||||
- name: GOMAXPROCS
|
||||
|
@ -157,29 +95,25 @@ spec:
|
|||
ports:
|
||||
- containerPort: {{ .Values.client.config.upload.server.port }}
|
||||
protocol: TCP
|
||||
- containerPort: {{ .Values.client.config.health.server.port }}
|
||||
protocol: TCP
|
||||
- containerPort: {{ .Values.client.config.metrics.server.port }}
|
||||
protocol: TCP
|
||||
- containerPort: {{ .Values.client.config.stats.server.port }}
|
||||
{{- if .Values.client.metrics.enable }}
|
||||
- containerPort: {{ .Values.client.config.metrics.port }}
|
||||
protocol: TCP
|
||||
{{- end }}
|
||||
readinessProbe:
|
||||
exec:
|
||||
command: ["/bin/grpc_health_probe", "-addr=unix://{{ .Values.client.config.download.server.socketPath }}"]
|
||||
command: ["/bin/grpc_health_probe", "-addr=:{{ .Values.client.config.upload.server.port }}"]
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 30
|
||||
timeoutSeconds: 5
|
||||
periodSeconds: 10
|
||||
livenessProbe:
|
||||
exec:
|
||||
command: ["/bin/grpc_health_probe", "-addr=unix://{{ .Values.client.config.download.server.socketPath }}"]
|
||||
command: ["/bin/grpc_health_probe", "-addr=:{{ .Values.client.config.upload.server.port }}"]
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 30
|
||||
timeoutSeconds: 5
|
||||
periodSeconds: 10
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: "/etc/dragonfly"
|
||||
- name: socket-dir
|
||||
mountPath: /var/run/dragonfly
|
||||
- name: storage
|
||||
mountPath: {{ .Values.client.config.storage.dir }}
|
||||
{{- if .Values.client.extraVolumeMounts }}
|
||||
{{- toYaml .Values.client.extraVolumeMounts | nindent 8 }}
|
||||
{{- end }}
|
||||
|
@ -187,39 +121,10 @@ spec:
|
|||
- name: config
|
||||
configMap:
|
||||
name: {{ template "dragonfly.client.fullname" . }}
|
||||
- name: socket-dir
|
||||
- name: storage
|
||||
hostPath:
|
||||
path: /var/run/dragonfly
|
||||
path: {{ .Values.client.config.storage.dir }}
|
||||
type: DirectoryOrCreate
|
||||
{{- if .Values.client.dfinit.enable }}
|
||||
- name: dfinit-config
|
||||
configMap:
|
||||
name: {{ template "dragonfly.dfinit.fullname" . }}
|
||||
{{- end }}
|
||||
{{- if and (.Values.client.dfinit.enable) (.Values.client.dfinit.config.containerRuntime.containerd) }}
|
||||
- name: containerd-config-dir
|
||||
hostPath:
|
||||
path: {{ dir .Values.client.dfinit.config.containerRuntime.containerd.configPath }}
|
||||
type: DirectoryOrCreate
|
||||
{{- end }}
|
||||
{{- if and (.Values.client.dfinit.enable) (.Values.client.dfinit.config.containerRuntime.crio) }}
|
||||
- name: crio-config-dir
|
||||
hostPath:
|
||||
path: {{ dir .Values.client.dfinit.config.containerRuntime.crio.configPath }}
|
||||
type: DirectoryOrCreate
|
||||
{{- end }}
|
||||
{{- if and (.Values.client.dfinit.enable) (.Values.client.dfinit.config.containerRuntime.podman) }}
|
||||
- name: podman-config-dir
|
||||
hostPath:
|
||||
path: {{ dir .Values.client.dfinit.config.containerRuntime.podman.configPath }}
|
||||
type: DirectoryOrCreate
|
||||
{{- end }}
|
||||
{{- if and (.Values.client.dfinit.enable) (.Values.client.dfinit.config.containerRuntime.docker) }}
|
||||
- name: docker-config-dir
|
||||
hostPath:
|
||||
path: {{ dir .Values.client.dfinit.config.containerRuntime.docker.configPath }}
|
||||
type: DirectoryOrCreate
|
||||
{{- end }}
|
||||
{{- if .Values.client.extraVolumes }}
|
||||
{{- toYaml .Values.client.extraVolumes | nindent 6 }}
|
||||
{{- end }}
|
||||
|
|
|
@ -1,18 +0,0 @@
|
|||
{{- if .Values.client.dfinit.enable }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "dragonfly.dfinit.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.client.name }}
|
||||
data:
|
||||
dfinit.yaml: |-
|
||||
proxy:
|
||||
addr: {{ .Values.client.dfinit.config.proxy.addr }}
|
||||
containerRuntime:
|
||||
{{ toYaml .Values.client.dfinit.config.containerRuntime | indent 6 }}
|
||||
{{- end }}
|
|
@ -1,30 +0,0 @@
|
|||
{{- if .Values.client.metrics.enable }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "dragonfly.client.fullname" . }}-metrics
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.client.name }}-metrics
|
||||
{{- if .Values.client.metrics.service.labels }}
|
||||
{{ toYaml .Values.client.metrics.service.labels | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.client.metrics.service.annotations }}
|
||||
annotations:
|
||||
{{ toYaml .Values.client.metrics.service.annotations | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
type: {{ .Values.client.metrics.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.client.config.metrics.server.port }}
|
||||
name: http-metrics
|
||||
appProtocol: http
|
||||
protocol: TCP
|
||||
targetPort: {{ .Values.client.config.metrics.server.port }}
|
||||
selector:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
component: {{ .Values.client.name }}
|
||||
{{- end }}
|
|
@ -0,0 +1,33 @@
|
|||
{{- if and .Values.client.metrics.enable .Values.client.metrics.podMonitor.enable }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PodMonitor
|
||||
metadata:
|
||||
name: {{ template "dragonfly.client.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.client.name }}
|
||||
{{- if .Values.client.metrics.podMonitor.additionalLabels }}
|
||||
{{ toYaml .Values.client.metrics.podMonitor.additionalLabels | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
podMetricsEndpoints:
|
||||
- port: {{ .Values.client.config.metrics.port }}
|
||||
path: /metrics
|
||||
{{- if .Values.client.metrics.podMonitor.interval }}
|
||||
interval: {{ .Values.client.metrics.podMonitor.interval }}
|
||||
{{- end }}
|
||||
{{- if .Values.client.metrics.podMonitor.scrapeTimeout }}
|
||||
scrapeTimeout: {{ .Values.client.metrics.podMonitor.scrapeTimeout }}
|
||||
{{- end }}
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- {{ .Release.Namespace }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
component: {{ .Values.client.name }}
|
||||
{{- end }}
|
|
@ -3,9 +3,9 @@ apiVersion: monitoring.coreos.com/v1
|
|||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: {{ template "dragonfly.client.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.client.name }}
|
||||
|
|
|
@ -1,31 +0,0 @@
|
|||
{{- if and .Values.client.metrics.enable .Values.client.metrics.serviceMonitor.enable }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ template "dragonfly.client.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.client.name }}
|
||||
{{- if .Values.client.metrics.serviceMonitor.additionalLabels }}
|
||||
{{ toYaml .Values.client.metrics.serviceMonitor.additionalLabels | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
endpoints:
|
||||
- port: http-metrics
|
||||
{{- if .Values.client.metrics.serviceMonitor.interval }}
|
||||
interval: {{ .Values.client.metrics.serviceMonitor.interval }}
|
||||
{{- end }}
|
||||
{{- if .Values.client.metrics.serviceMonitor.scrapeTimeout }}
|
||||
scrapeTimeout: {{ .Values.client.metrics.serviceMonitor.scrapeTimeout }}
|
||||
{{- end }}
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- {{ .Release.Namespace }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
component: {{ .Values.client.name }}-metrics
|
||||
{{- end }}
|
|
@ -0,0 +1,107 @@
|
|||
{{- if .Values.dfdaemon.enable }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "dragonfly.dfdaemon.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.dfdaemon.name }}
|
||||
data:
|
||||
dfget.yaml: |-
|
||||
aliveTime: {{ .Values.dfdaemon.config.aliveTime }}
|
||||
gcInterval: {{ .Values.dfdaemon.config.gcInterval }}
|
||||
keepStorage: {{ .Values.dfdaemon.config.keepStorage }}
|
||||
workHome: {{ .Values.dfdaemon.config.workHome }}
|
||||
logDir: {{ .Values.dfdaemon.config.logDir }}
|
||||
cacheDir: {{ .Values.dfdaemon.config.cacheDir }}
|
||||
pluginDir: {{ .Values.dfdaemon.config.pluginDir }}
|
||||
dataDir: {{ .Values.dfdaemon.config.dataDir }}
|
||||
console: {{ .Values.dfdaemon.config.console }}
|
||||
health:
|
||||
{{ toYaml .Values.dfdaemon.config.health | indent 6 }}
|
||||
verbose: {{ .Values.dfdaemon.config.verbose }}
|
||||
{{- if .Values.dfdaemon.config.verbose }}
|
||||
pprof-port: {{ .Values.dfdaemon.config.pprofPort }}
|
||||
{{- end }}
|
||||
{{- if .Values.dfdaemon.metrics.enable }}
|
||||
metrics: ":8000"
|
||||
{{- end }}
|
||||
{{- if .Values.dfdaemon.config.jaeger }}
|
||||
jaeger: {{ .Values.dfdaemon.config.jaeger }}
|
||||
{{- else if .Values.jaeger.enable }}
|
||||
jaeger: http://{{ $.Release.Name }}-jaeger-collector.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }}:14268/api/traces
|
||||
{{- end }}
|
||||
scheduler:
|
||||
manager:
|
||||
enable: {{ .Values.dfdaemon.config.scheduler.manager.enable }}
|
||||
netAddrs:
|
||||
{{- if and (.Values.dfdaemon.config.scheduler.manager.enable) (.Values.dfdaemon.config.scheduler.manager.netAddrs) }}
|
||||
{{ toYaml .Values.dfdaemon.config.scheduler.manager.netAddrs | indent 10 }}
|
||||
{{- else if .Values.manager.enable }}
|
||||
- type: tcp
|
||||
addr: {{ template "dragonfly.manager.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.manager.grpcPort }}
|
||||
{{- else }}
|
||||
- type: tcp
|
||||
addr: {{ .Values.externalManager.host }}:{{ .Values.externalManager.grpcPort }}
|
||||
{{- end }}
|
||||
refreshInterval: {{ .Values.dfdaemon.config.scheduler.manager.refreshInterval }}
|
||||
netAddrs:
|
||||
{{- if and (not .Values.dfdaemon.config.scheduler.manager.enable) (.Values.dfdaemon.config.scheduler.netAddrs) }}
|
||||
{{ toYaml .Values.dfdaemon.config.scheduler.netAddrs | indent 8 }}
|
||||
{{- end }}
|
||||
scheduleTimeout: {{ .Values.dfdaemon.config.scheduler.scheduleTimeout }}
|
||||
disableAutoBackSource: {{ .Values.dfdaemon.config.scheduler.disableAutoBackSource }}
|
||||
seedPeer:
|
||||
{{ toYaml .Values.dfdaemon.config.scheduler.manager.seedPeer | indent 8 }}
|
||||
host:
|
||||
{{ toYaml .Values.dfdaemon.config.host | indent 6 }}
|
||||
download:
|
||||
{{ toYaml .Values.dfdaemon.config.download | indent 6 }}
|
||||
upload:
|
||||
{{ toYaml .Values.dfdaemon.config.upload | indent 6 }}
|
||||
objectStorage:
|
||||
{{ toYaml .Values.dfdaemon.config.objectStorage | indent 6 }}
|
||||
storage:
|
||||
{{ toYaml .Values.dfdaemon.config.storage | indent 6 }}
|
||||
proxy:
|
||||
defaultFilter: {{ .Values.dfdaemon.config.proxy.defaultFilter }}
|
||||
defaultTag: {{ .Values.dfdaemon.config.proxy.defaultTag }}
|
||||
tcpListen:
|
||||
{{- if not .Values.dfdaemon.hostNetwork }}
|
||||
namespace: {{ .Values.dfdaemon.config.proxy.tcpListen.namespace }}
|
||||
{{- end }}
|
||||
port: {{ .Values.dfdaemon.containerPort }}
|
||||
security:
|
||||
{{ toYaml .Values.dfdaemon.config.proxy.security | indent 8 }}
|
||||
registryMirror:
|
||||
{{ toYaml .Values.dfdaemon.config.proxy.registryMirror | indent 8 }}
|
||||
proxies:
|
||||
{{ toYaml .Values.dfdaemon.config.proxy.proxies | indent 8 }}
|
||||
{{- if .Values.containerRuntime.docker.enable }}
|
||||
hijackHTTPS:
|
||||
cert: /etc/dragonfly-ca/cacert.pem
|
||||
key: /etc/dragonfly-ca/cakey.pem
|
||||
hosts:
|
||||
- regx: .*
|
||||
insecure: {{ .Values.containerRuntime.docker.insecure }}
|
||||
{{- if and .Values.containerRuntime.docker.injectHosts (not .Values.containerRuntime.docker.restart) }}
|
||||
sni:
|
||||
{{- range .Values.containerRuntime.docker.registryPorts }}
|
||||
- listen: 127.0.0.1
|
||||
port: {{ . }}
|
||||
{{- if not $.Values.dfdaemon.hostNetwork }}
|
||||
namespace: {{ $.Values.dfdaemon.config.proxy.tcpListen.namespace }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
security:
|
||||
{{ toYaml .Values.dfdaemon.config.security | indent 6 }}
|
||||
network:
|
||||
{{ toYaml .Values.dfdaemon.config.network | indent 6 }}
|
||||
announcer:
|
||||
{{ toYaml .Values.dfdaemon.config.announcer | indent 6 }}
|
||||
{{- end }}
|
|
@ -0,0 +1,605 @@
|
|||
{{- if .Values.dfdaemon.enable }}
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: {{ template "dragonfly.dfdaemon.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.dfdaemon.name }}
|
||||
annotations:
|
||||
{{- if .Values.dfdaemon.daemonsetAnnotations }}
|
||||
{{ toYaml .Values.dfdaemon.daemonsetAnnotations | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.dfdaemon.updateStrategy }}
|
||||
updateStrategy:
|
||||
{{ toYaml .Values.dfdaemon.updateStrategy | indent 4 }}
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
component: "{{ .Values.dfdaemon.name }}"
|
||||
release: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
component: "{{ .Values.dfdaemon.name }}"
|
||||
release: {{ .Release.Name }}
|
||||
{{- if .Values.dfdaemon.podLabels }}
|
||||
{{ toYaml .Values.dfdaemon.podLabels | indent 8 }}
|
||||
{{- end }}
|
||||
annotations:
|
||||
checksum/config: {{ include (print $.Template.BasePath "/dfdaemon/dfdaemon-configmap.yaml") . | sha256sum }}
|
||||
{{- if .Values.dfdaemon.podAnnotations }}
|
||||
{{ toYaml .Values.dfdaemon.podAnnotations | indent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.dfdaemon.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml .Values.dfdaemon.nodeSelector | indent 8 }}
|
||||
{{- end }}
|
||||
hostNetwork: {{ .Values.dfdaemon.hostNetwork }}
|
||||
{{- if .Values.dfdaemon.hostNetwork }}
|
||||
dnsPolicy: "ClusterFirstWithHostNet"
|
||||
{{- end }}
|
||||
hostPID: {{ or (and .Values.containerRuntime.docker.enable .Values.containerRuntime.docker.restart) .Values.containerRuntime.containerd.enable .Values.containerRuntime.crio.enable (gt (len .Values.containerRuntime.extraInitContainers) 0) }}
|
||||
{{- if .Values.dfdaemon.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml .Values.dfdaemon.tolerations | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.dfdaemon.affinity }}
|
||||
affinity:
|
||||
{{ toYaml .Values.dfdaemon.affinity | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if quote .Values.dfdaemon.terminationGracePeriodSeconds }}
|
||||
terminationGracePeriodSeconds: {{ .Values.dfdaemon.terminationGracePeriodSeconds }}
|
||||
{{- end }}
|
||||
{{- if and (.Capabilities.APIVersions.Has "scheduling.k8s.io/v1beta1") (.Values.dfdaemon.priorityClassName) }}
|
||||
priorityClassName: {{ .Values.dfdaemon.priorityClassName }}
|
||||
{{- end }}
|
||||
{{- with .Values.dfdaemon.pullSecrets | default .Values.global.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.dfdaemon.hostAliases }}
|
||||
hostAliases:
|
||||
{{ toYaml .Values.dfdaemon.hostAliases | indent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: dfdaemon
|
||||
image: "{{ .Values.dfdaemon.image }}:{{ .Values.dfdaemon.tag }}"
|
||||
imagePullPolicy: {{ .Values.dfdaemon.pullPolicy | quote }}
|
||||
resources:
|
||||
{{ toYaml .Values.dfdaemon.resources | indent 12 }}
|
||||
env:
|
||||
{{- if .Values.dfdaemon.maxProcs }}
|
||||
- name: GOMAXPROCS
|
||||
value: {{ .Values.dfdaemon.maxProcs }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- containerPort: {{ .Values.dfdaemon.containerPort }}
|
||||
{{- if and (not .Values.dfdaemon.hostNetwork) (empty .Values.dfdaemon.config.proxy.tcpListen.namespace) }}
|
||||
hostPort: {{ .Values.dfdaemon.hostPort }}
|
||||
hostIP: 127.0.0.1
|
||||
{{- end }}
|
||||
protocol: TCP
|
||||
- containerPort: {{ .Values.dfdaemon.config.health.tcpListen.port }}
|
||||
hostPort: {{ .Values.dfdaemon.config.health.tcpListen.port }}
|
||||
hostIP: 127.0.0.1
|
||||
protocol: TCP
|
||||
{{- if .Values.dfdaemon.config.objectStorage.enable }}
|
||||
- containerPort: {{ .Values.dfdaemon.config.objectStorage.tcpListen.port }}
|
||||
protocol: TCP
|
||||
{{- end }}
|
||||
{{- if .Values.dfdaemon.metrics.enable }}
|
||||
- containerPort: 8000
|
||||
protocol: TCP
|
||||
{{- end }}
|
||||
readinessProbe:
|
||||
exec:
|
||||
command: ["/bin/grpc_health_probe", "-addr=:{{ .Values.dfdaemon.config.download.peerGRPC.tcpListen.port }}"]
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
livenessProbe:
|
||||
exec:
|
||||
command: ["/bin/grpc_health_probe", "-addr=:{{ .Values.dfdaemon.config.download.peerGRPC.tcpListen.port }}"]
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 10
|
||||
{{- if and .Values.containerRuntime.docker.enable (not .Values.containerRuntime.docker.restart) }}
|
||||
{{- if .Values.containerRuntime.docker.injectHosts }}
|
||||
lifecycle:
|
||||
postStart:
|
||||
exec:
|
||||
command:
|
||||
- "/bin/sh"
|
||||
- "-c"
|
||||
- |
|
||||
# inject hosts after dfdaemon started
|
||||
domains="{{- join " " .Values.containerRuntime.docker.registryDomains }}"
|
||||
# remove static dns in pod /etc/hosts, which injected by host network
|
||||
echo "$(sed '/# Dragonfly SNI Host/d' /etc/hosts)" > /etc/hosts
|
||||
|
||||
if [[ -n "$domains" ]]; then
|
||||
for domain in $domains; do
|
||||
# inject static dns into /host/etc/hosts
|
||||
if grep "127.0.0.1 $domain" /host/etc/hosts; then
|
||||
echo "Dragonfly SNI Host $domain Found in /host/etc/hosts"
|
||||
continue
|
||||
else
|
||||
echo "Try to add dragonfly SNI host $domain"
|
||||
echo "127.0.0.1 $domain # Dragonfly SNI Host $domain" >> /host/etc/hosts
|
||||
echo "Dragonfly SNI host $domain added"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- "/bin/sh"
|
||||
- "-c"
|
||||
- |
|
||||
# when stop dfdaemon, clean up injected hosts info in /etc/hosts for current node
|
||||
echo "$(sed '/# Dragonfly SNI Host/d' /host/etc/hosts)" > /host/etc/hosts
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: "/etc/dragonfly"
|
||||
{{- if and .Values.containerRuntime.docker.enable (not .Values.containerRuntime.docker.restart) }}
|
||||
{{- if and .Values.containerRuntime.docker.injectHosts }}
|
||||
- name: etc
|
||||
mountPath: /host/etc
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.containerRuntime.docker.enable }}
|
||||
- name: d7y-ca
|
||||
mountPath: /etc/dragonfly-ca
|
||||
{{- end }}
|
||||
{{- if .Values.dfdaemon.extraVolumeMounts }}
|
||||
{{- toYaml .Values.dfdaemon.extraVolumeMounts | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if and (not .Values.dfdaemon.hostNetwork) .Values.dfdaemon.config.proxy.tcpListen.namespace }}
|
||||
- name: run
|
||||
mountPath: /run/dragonfly
|
||||
- name: data
|
||||
mountPath: {{ .Values.dfdaemon.config.dataDir }}
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- SYS_ADMIN
|
||||
{{- end }}
|
||||
{{- if or (and (not .Values.dfdaemon.hostNetwork) .Values.dfdaemon.config.proxy.tcpListen.namespace) .Values.containerRuntime.containerd.enable .Values.containerRuntime.docker.enable .Values.containerRuntime.extraInitContainers }}
|
||||
initContainers:
|
||||
{{- if .Values.scheduler.enable }}
|
||||
- name: wait-for-scheduler
|
||||
image: {{ .Values.dfdaemon.initContainer.image }}:{{ .Values.dfdaemon.initContainer.tag }}
|
||||
imagePullPolicy: {{ .Values.dfdaemon.initContainer.pullPolicy }}
|
||||
command: ['sh', '-c', 'until nslookup {{ template "dragonfly.scheduler.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} && nc -vz {{ template "dragonfly.scheduler.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} {{ .Values.scheduler.config.server.port }}; do echo waiting for scheduler; sleep 2; done;']
|
||||
{{- end }}
|
||||
{{- if and (not .Values.dfdaemon.hostNetwork) .Values.dfdaemon.config.proxy.tcpListen.namespace }}
|
||||
- name: mount-netns
|
||||
image: "{{ .Values.dfdaemon.image }}:{{ .Values.dfdaemon.tag }}"
|
||||
imagePullPolicy: {{ .Values.dfdaemon.pullPolicy | quote }}
|
||||
resources:
|
||||
{{ toYaml .Values.dfdaemon.resources | indent 12 }}
|
||||
# FIXME dfget daemon only need /proc/1/ns/net and CAP_SYS_ADMIN,
|
||||
# but containerd resolves the symbolic of /proc/1/ns/net from v1.5.0.
|
||||
# due to /proc/1/ns/net is not a regular symbolic link, it always failed.
|
||||
# for keeping only CAP_SYS_ADMIN capability, use init container to bind mount only netns to /run/dragonfly/net.
|
||||
# https://github.com/containerd/containerd/blob/v1.5.0/pkg/cri/opts/spec_linux.go#L171.
|
||||
command:
|
||||
- /bin/sh
|
||||
- -cx
|
||||
- |-
|
||||
if [ ! -e "/run/dragonfly/net" ]; then
|
||||
touch /run/dragonfly/net
|
||||
fi
|
||||
i1=$(stat -L -c %i /host/ns/net)
|
||||
i2=$(stat -L -c %i /run/dragonfly/net)
|
||||
if [ "$i1" != "$i2" ]; then
|
||||
/bin/mount -o bind /host/ns/net /run/dragonfly/net
|
||||
fi
|
||||
volumeMounts:
|
||||
- name: hostns
|
||||
mountPath: /host/ns
|
||||
- name: run
|
||||
mountPath: /run/dragonfly
|
||||
# bind mount need Bidirectional to propagate into host.
|
||||
mountPropagation: Bidirectional
|
||||
securityContext:
|
||||
# open /proc/1/ns need privilege permission.
|
||||
privileged: true
|
||||
{{- end }}
|
||||
{{- if .Values.containerRuntime.extraInitContainers }}
|
||||
{{ toYaml .Values.containerRuntime.extraInitContainers | indent 6 }}
|
||||
{{- else if .Values.containerRuntime.docker.enable }}
|
||||
- name: update-docker-config
|
||||
image: "{{ .Values.containerRuntime.initContainerImage }}"
|
||||
imagePullPolicy: {{ .Values.dfdaemon.pullPolicy | quote }}
|
||||
resources:
|
||||
{{ toYaml .Values.dfdaemon.resources | indent 12 }}
|
||||
command:
|
||||
- /bin/sh
|
||||
- -cx
|
||||
- |-
|
||||
mkdir -p /tmp/dragonfly-ca
|
||||
cd /tmp/dragonfly-ca
|
||||
|
||||
openssl genrsa -out cakey.pem 2048
|
||||
|
||||
cat << EOF > root.conf
|
||||
[ req ]
|
||||
default_bits = 2048
|
||||
default_keyfile = key.pem
|
||||
default_md = sha256
|
||||
distinguished_name = req_distinguished_name
|
||||
req_extensions = req_ext
|
||||
string_mask = nombstr
|
||||
x509_extensions = x509_ext
|
||||
[ req_distinguished_name ]
|
||||
countryName = Country Name (2 letter code)
|
||||
countryName_default = {{.Values.containerRuntime.docker.caCert.countryName}}
|
||||
stateOrProvinceName = State or Province Name (full name)
|
||||
stateOrProvinceName_default = {{.Values.containerRuntime.docker.caCert.stateOrProvinceName}}
|
||||
localityName = Locality Name (eg, city)
|
||||
localityName_default = {{.Values.containerRuntime.docker.caCert.localityName}}
|
||||
organizationName = Organization Name (eg, company)
|
||||
organizationName_default = {{.Values.containerRuntime.docker.caCert.organizationName}}
|
||||
commonName = Common Name (e.g. server FQDN or YOUR name)
|
||||
commonName_max = 64
|
||||
commonName_default = {{.Values.containerRuntime.docker.caCert.commonName}}
|
||||
[ x509_ext ]
|
||||
authorityKeyIdentifier = keyid,issuer
|
||||
basicConstraints = CA:TRUE
|
||||
keyUsage = digitalSignature, keyEncipherment, keyCertSign, cRLSign
|
||||
subjectKeyIdentifier = hash
|
||||
[ req_ext ]
|
||||
basicConstraints = CA:TRUE
|
||||
keyUsage = digitalSignature, keyEncipherment, keyCertSign, cRLSign
|
||||
subjectKeyIdentifier = hash
|
||||
EOF
|
||||
|
||||
openssl req -batch -new -x509 -key ./cakey.pem -out ./cacert.pem -days 65536 -config ./root.conf
|
||||
openssl x509 -inform PEM -in ./cacert.pem -outform DER -out ./CA.cer
|
||||
|
||||
openssl x509 -in ./cacert.pem -noout -text
|
||||
# update ca for golang program(docker in host), refer: https://github.com/golang/go/blob/go1.17/src/crypto/x509/root_linux.go#L8
|
||||
ca_list="/etc/ssl/certs/ca-certificates.crt /etc/pki/tls/certs/ca-bundle.crt /etc/ssl/ca-bundle.pem /etc/pki/tls/cacert.pem /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem /etc/ssl/cert.pem"
|
||||
for ca in $ca_list; do
|
||||
ca="/host$ca"
|
||||
if [[ -e "$ca" ]]; then
|
||||
echo "CA $ca" found
|
||||
if grep "Dragonfly Authority CA" "$ca"; then
|
||||
echo "Dragonfly Authority ca found"
|
||||
if [[ -e /host/etc/dragonfly-ca/cakey.pem && -e /host/etc/dragonfly-ca/cacert.pem ]]; then
|
||||
echo "CA cert and key ready"
|
||||
break
|
||||
else
|
||||
echo "Warning: CA cert and key not ready"
|
||||
fi
|
||||
fi
|
||||
echo "Try to add Dragonfly CA"
|
||||
echo "# Dragonfly Authority CA" > cacert.toadd.pem
|
||||
cat cacert.pem >> cacert.toadd.pem
|
||||
cat cacert.toadd.pem >> "$ca"
|
||||
echo "Dragonfly CA added"
|
||||
cp -f ./cakey.pem ./cacert.pem /host/etc/dragonfly-ca/
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
{{- if not .Values.containerRuntime.docker.restart }}
|
||||
domains="{{- join " " .Values.containerRuntime.docker.registryDomains }}"
|
||||
if [[ -n "$domains" ]]; then
|
||||
for domain in $domains; do
|
||||
# inject docker cert by registry domain
|
||||
dir=/host/etc/docker/certs.d/$domain
|
||||
mkdir -p "$dir"
|
||||
echo copy CA cert to $dir
|
||||
cp -f /host/etc/dragonfly-ca/cacert.pem "$dir/ca.crt"
|
||||
done
|
||||
fi
|
||||
{{- end }}
|
||||
{{- if .Values.containerRuntime.docker.restart }}
|
||||
# inject docker proxy setting and restart docker
|
||||
# currently, without host pid in container, we can not nsenter with pid and can not invoke systemctl correctly.
|
||||
status=$(nsenter -t 1 -m -- systemctl status docker --no-pager | grep http-proxy.conf)
|
||||
if [[ -n "$status" ]]; then
|
||||
echo Docker proxy already enabled, skip
|
||||
else
|
||||
echo Try to inject proxy and restart docker
|
||||
path=$(nsenter -t 1 -m -- systemctl show -p FragmentPath docker.service | grep -o "/.*systemd.*")
|
||||
if [[ -z "$path" ]]; then
|
||||
echo docker.service not found
|
||||
exit 1
|
||||
fi
|
||||
nsenter -t 1 -m -- mkdir -p "$path".d
|
||||
nsenter -t 1 -m -- sh -c "echo '[Service]' > $path.d/http-proxy.conf"
|
||||
nsenter -t 1 -m -- sh -c "echo 'Environment=\"HTTP_PROXY=http://127.0.0.1:{{ .Values.dfdaemon.hostPort}}\"' >> $path.d/http-proxy.conf"
|
||||
nsenter -t 1 -m -- sh -c "echo 'Environment=\"HTTPS_PROXY=http://127.0.0.1:{{ .Values.dfdaemon.hostPort}}\"' >> $path.d/http-proxy.conf"
|
||||
nsenter -t 1 -m -- sh -c "echo 'Environment=\"NO_PROXY={{ join "," .Values.containerRuntime.docker.skipHosts }}\"' >> $path.d/http-proxy.conf"
|
||||
nsenter -t 1 -m -- systemctl daemon-reload
|
||||
nsenter -t 1 -m -- systemctl restart docker.service
|
||||
fi
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- name: etc
|
||||
mountPath: /host/etc
|
||||
{{- if .Values.containerRuntime.docker.restart }}
|
||||
securityContext:
|
||||
# nsenter need privilege permission.
|
||||
privileged: true
|
||||
{{- end }}
|
||||
{{- else if .Values.containerRuntime.containerd.enable }}
|
||||
- name: update-containerd
|
||||
image: "{{ .Values.containerRuntime.initContainerImage }}"
|
||||
imagePullPolicy: {{ .Values.dfdaemon.pullPolicy | quote }}
|
||||
resources:
|
||||
{{ toYaml .Values.dfdaemon.resources | indent 12 }}
|
||||
command:
|
||||
- /bin/sh
|
||||
- -cx
|
||||
- |-
|
||||
etcContainerd=/host{{ .Values.containerRuntime.containerd.configPathDir }}
|
||||
if [[ -e $etcContainerd/{{ default "config.toml" .Values.containerRuntime.containerd.configFileName }} ]]; then
|
||||
echo containerd config found
|
||||
else
|
||||
echo $etcContainerd/{{ default "config.toml" .Values.containerRuntime.containerd.configFileName }} not found
|
||||
exit 1
|
||||
fi
|
||||
cat $etcContainerd/{{ default "config.toml" .Values.containerRuntime.containerd.configFileName }}
|
||||
|
||||
registries="{{- join " " .Values.containerRuntime.containerd.registries }}"
|
||||
if [[ -n "$domains" ]]; then
|
||||
echo empty registry domains
|
||||
exit 1
|
||||
fi
|
||||
# detect containerd config version
|
||||
need_restart=0
|
||||
if grep "version[^=]*=[^2]*2" $etcContainerd/{{ default "config.toml" .Values.containerRuntime.containerd.configFileName }}; then
|
||||
# inject v2 mirror setting
|
||||
|
||||
# get config_path if set
|
||||
{{- if .Values.containerRuntime.containerd.injectConfigPath }}
|
||||
config_path=$etcContainerd/certs.d
|
||||
{{- else }}
|
||||
config_path=$(cat $etcContainerd/{{ default "config.toml" .Values.containerRuntime.containerd.configFileName }} | tr '"' ' ' | grep config_path | awk '{print $3}')
|
||||
{{- end }}
|
||||
if [[ -z "$config_path" ]]; then
|
||||
echo config_path is not enabled, just add one mirror in {{ default "config.toml" .Values.containerRuntime.containerd.configFileName }}
|
||||
# parse registry domain
|
||||
registry={{ .Values.dfdaemon.config.proxy.registryMirror.url}}
|
||||
domain=$(echo $registry | sed -e "s,http.*://,," | sed "s,:.*,,")
|
||||
# inject registry
|
||||
if grep "registry.mirrors.\"$domain\"" $etcContainerd/{{ default "config.toml" .Values.containerRuntime.containerd.configFileName }}; then
|
||||
# TODO merge mirrors
|
||||
echo "registry $registry found in {{ default "config.toml" .Values.containerRuntime.containerd.configFileName }}, skip"
|
||||
else
|
||||
cat << EOF >> $etcContainerd/{{ default "config.toml" .Values.containerRuntime.containerd.configFileName }}
|
||||
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."$domain"]
|
||||
endpoint = ["http://127.0.0.1:{{ .Values.dfdaemon.hostPort}}","$registry"]
|
||||
EOF
|
||||
echo "Registry $domain added"
|
||||
need_restart=1
|
||||
fi
|
||||
{{- if .Values.containerRuntime.containerd.injectRegistryCredencials.enable}}
|
||||
if grep "registry.configs.\"$domain\".auth" $etcContainerd/{{ default "config.toml" .Values.containerRuntime.containerd.configFileName }}; then
|
||||
echo "registry auth $registry found in {{ default "config.toml" .Values.containerRuntime.containerd.configFileName }}, skip"
|
||||
else
|
||||
cat << EOF >> $etcContainerd/{{ default "config.toml" .Values.containerRuntime.containerd.configFileName }}
|
||||
[plugins."io.containerd.grpc.v1.cri".registry.configs."$domain".auth]
|
||||
username = "{{ .Values.containerRuntime.containerd.injectRegistryCredencials.username }}"
|
||||
password = "{{ .Values.containerRuntime.containerd.injectRegistryCredencials.password }}"
|
||||
auth = "{{ .Values.containerRuntime.containerd.injectRegistryCredencials.auth }}"
|
||||
identitytoken = "{{ .Values.containerRuntime.containerd.injectRegistryCredencials.identitytoken }}"
|
||||
EOF
|
||||
echo "Registry auth $domain added"
|
||||
need_restart=1
|
||||
fi
|
||||
{{- end }}
|
||||
else
|
||||
echo config_path is enabled, add mirror in $config_path
|
||||
# TODO check whether config_path is enabled, if not, add it
|
||||
tmp=$(cat $etcContainerd/{{ default "config.toml" .Values.containerRuntime.containerd.configFileName }} | tr '"' ' ' | grep config_path | awk '{print $3}')
|
||||
if [[ -z "$tmp" ]]; then
|
||||
echo inject config_path into $etcContainerd/{{ default "config.toml" .Values.containerRuntime.containerd.configFileName }}
|
||||
if grep -q '\[plugins."io.containerd.grpc.v1.cri".registry\]' $etcContainerd/{{ default "config.toml" .Values.containerRuntime.containerd.configFileName }}; then
|
||||
sed -i 's|\[plugins."io.containerd.grpc.v1.cri".registry\]|\[plugins."io.containerd.grpc.v1.cri".registry\]\nconfig_path = "{{ .Values.containerRuntime.containerd.configPathDir }}/certs.d"|g' $etcContainerd/{{ default "config.toml" .Values.containerRuntime.containerd.configFileName }}
|
||||
else
|
||||
cat << EOF >> $etcContainerd/{{ default "config.toml" .Values.containerRuntime.containerd.configFileName }}
|
||||
[plugins."io.containerd.grpc.v1.cri".registry]
|
||||
config_path = "{{ .Values.containerRuntime.containerd.configPathDir }}/certs.d"
|
||||
EOF
|
||||
fi
|
||||
echo "Registry config_path $config_path added"
|
||||
need_restart=1
|
||||
fi
|
||||
mkdir -p $etcContainerd/certs.d
|
||||
for registry in $registries; do
|
||||
# If the registry is docker.io, then the domain name should
|
||||
# be changed to index.docker.io.
|
||||
if [ $registry == "https://docker.io" ]; then
|
||||
registry_domain=https://index.docker.io
|
||||
elif [ $registry == "http://docker.io" ]; then
|
||||
registry_domain=http://index.docker.io
|
||||
else
|
||||
registry_domain=$registry
|
||||
fi
|
||||
# parse registry domain
|
||||
domain=$(echo $registry | sed -e "s,http.*://,,")
|
||||
# inject registry
|
||||
mkdir -p $etcContainerd/certs.d/$domain
|
||||
if [[ -e "$etcContainerd/certs.d/$domain/hosts.toml" ]]; then
|
||||
echo "registry $registry found in {{ default "config.toml" .Values.containerRuntime.containerd.configFileName }}, skip"
|
||||
continue
|
||||
else
|
||||
cat << EOF >> $etcContainerd/certs.d/$domain/hosts.toml
|
||||
server = "$registry_domain"
|
||||
[host."http://127.0.0.1:{{ .Values.dfdaemon.hostPort}}"]
|
||||
capabilities = ["pull", "resolve"]
|
||||
[host."http://127.0.0.1:{{ .Values.dfdaemon.hostPort}}".header]
|
||||
X-Dragonfly-Registry = ["$registry_domain"]
|
||||
[host."$registry_domain"]
|
||||
capabilities = ["pull", "resolve"]
|
||||
EOF
|
||||
echo "Registry $domain added"
|
||||
{{- if not .Values.containerRuntime.containerd.injectConfigPath }}
|
||||
need_restart=1
|
||||
{{- end }}
|
||||
fi
|
||||
done
|
||||
fi
|
||||
else
|
||||
# inject legacy v1 mirror setting
|
||||
echo containerd config is version 1, just only support one mirror in {{ default "config.toml" .Values.containerRuntime.containerd.configFileName }}
|
||||
# parse registry domain
|
||||
registry={{ .Values.dfdaemon.config.proxy.registryMirror.url}}
|
||||
domain=$(echo {{ .Values.dfdaemon.config.proxy.registryMirror.url}} | sed -e "s,http.*://,," | sed "s,:.*,,")
|
||||
# inject registry
|
||||
if grep "registry.mirrors.\"$domain\"" $etcContainerd/{{ default "config.toml" .Values.containerRuntime.containerd.configFileName }}; then
|
||||
# TODO merge mirrors
|
||||
echo "registry $registry found in {{ default "config.toml" .Values.containerRuntime.containerd.configFileName }}, skip"
|
||||
else
|
||||
cat << EOF >> $etcContainerd/{{ default "config.toml" .Values.containerRuntime.containerd.configFileName }}
|
||||
[plugins.cri.registry.mirrors."$domain"]
|
||||
endpoint = ["http://127.0.0.1:{{ .Values.dfdaemon.hostPort}}","$registry"]
|
||||
EOF
|
||||
echo "Registry $domain added"
|
||||
need_restart=1
|
||||
fi
|
||||
{{- if .Values.containerRuntime.containerd.injectRegistryCredencials.enable}}
|
||||
if grep "registry.configs.\"$domain\".auth" $etcContainerd/{{ default "config.toml" .Values.containerRuntime.containerd.configFileName }}; then
|
||||
echo "registry auth $registry found in {{ default "config.toml" .Values.containerRuntime.containerd.configFileName }}, skip"
|
||||
else
|
||||
cat << EOF >> $etcContainerd/{{ default "config.toml" .Values.containerRuntime.containerd.configFileName }}
|
||||
[plugins."io.containerd.grpc.v1.cri".registry.configs."$domain".auth]
|
||||
username = "{{ .Values.containerRuntime.containerd.injectRegistryCredencials.username }}"
|
||||
password = "{{ .Values.containerRuntime.containerd.injectRegistryCredencials.password }}"
|
||||
auth = "{{ .Values.containerRuntime.containerd.injectRegistryCredencials.auth }}"
|
||||
EOF
|
||||
echo "Registry auth $domain added"
|
||||
need_restart=1
|
||||
fi
|
||||
{{- end }}
|
||||
fi
|
||||
# restart containerd
|
||||
# currently, without host pid in container, we can not nsenter with pid and can not invoke systemctl correctly.
|
||||
if [[ "$need_restart" -gt 0 ]]; then
|
||||
nsenter -t 1 -m -- systemctl restart containerd.service
|
||||
fi
|
||||
volumeMounts:
|
||||
- name: containerd-conf
|
||||
mountPath: /host{{ .Values.containerRuntime.containerd.configPathDir }}
|
||||
securityContext:
|
||||
# nsenter need privilege permission.
|
||||
privileged: true
|
||||
{{- else if .Values.containerRuntime.crio.enable }}
|
||||
- name: update-crio
|
||||
image: "{{ .Values.containerRuntime.initContainerImage }}"
|
||||
imagePullPolicy: {{ .Values.dfdaemon.pullPolicy | quote }}
|
||||
resources:
|
||||
{{ toYaml .Values.dfdaemon.resources | indent 12 }}
|
||||
command:
|
||||
- /bin/sh
|
||||
- -cx
|
||||
- |-
|
||||
registries="{{- join " " .Values.containerRuntime.crio.registries }}"
|
||||
if [[ -n "$domains" ]]; then
|
||||
echo Empty registry domains
|
||||
exit 1
|
||||
fi
|
||||
|
||||
confd="/host/etc/containers/registries.conf.d"
|
||||
if [[ ! -e "$confd" ]]; then
|
||||
mkdir -p "$confd"
|
||||
fi
|
||||
|
||||
for registry in $registries; do
|
||||
# parse registry domain
|
||||
domain=$(echo $registry | sed "s,http.://,," | sed "s,:.*,,")
|
||||
schema=$(echo $registry | sed "s,://.*,,")
|
||||
port=$(echo $registry | sed "s,http.://,," | sed "s,[^:]*,," | sed "s,:,,")
|
||||
insecure=false
|
||||
if [[ "$schema" == "http" ]]; then
|
||||
insecure=true
|
||||
fi
|
||||
if [[ -z "$port" ]]; then
|
||||
if [[ "$schema" == "https" ]]; then
|
||||
port=443
|
||||
elif [[ "$schema" == "http" ]]; then
|
||||
port=80
|
||||
fi
|
||||
fi
|
||||
echo schema: $schema, domain: $domain, port: $port
|
||||
# inject registry
|
||||
if [[ -e "$confd/$domain.conf" ]]; then
|
||||
echo "registry $registry found in $confd, skip"
|
||||
continue
|
||||
else
|
||||
cat << EOF > "$confd/$domain.conf"
|
||||
[[registry]]
|
||||
prefix = "$domain"
|
||||
location = "$domain:$port"
|
||||
insecure = $insecure
|
||||
[[registry.mirror]]
|
||||
location = "127.0.0.1:{{ .Values.dfdaemon.hostPort}}"
|
||||
insecure = true
|
||||
EOF
|
||||
echo "Registry $domain added"
|
||||
fi
|
||||
done
|
||||
nsenter -t 1 -m -- systemctl reload crio.service
|
||||
volumeMounts:
|
||||
- name: etc
|
||||
mountPath: /host/etc
|
||||
securityContext:
|
||||
# nsenter need privilege permission.
|
||||
privileged: true
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: {{ template "dragonfly.dfdaemon.fullname" . }}
|
||||
{{- if and (not .Values.dfdaemon.hostNetwork) .Values.dfdaemon.config.proxy.tcpListen.namespace }}
|
||||
- name: hostns
|
||||
hostPath:
|
||||
path: /proc/1/ns
|
||||
- name: run
|
||||
hostPath:
|
||||
path: /run/dragonfly
|
||||
type: DirectoryOrCreate
|
||||
{{- end }}
|
||||
{{- if .Values.containerRuntime.docker.enable }}
|
||||
- name: etc
|
||||
hostPath:
|
||||
path: /etc
|
||||
- name: d7y-ca
|
||||
hostPath:
|
||||
path: /etc/dragonfly-ca
|
||||
type: DirectoryOrCreate
|
||||
{{- else if .Values.containerRuntime.containerd.enable }}
|
||||
- name: containerd-conf
|
||||
hostPath:
|
||||
path: {{ .Values.containerRuntime.containerd.configPathDir }}
|
||||
{{- else if .Values.containerRuntime.crio.enable }}
|
||||
- name: etc
|
||||
hostPath:
|
||||
path: /etc
|
||||
{{- end }}
|
||||
- name: data
|
||||
{{- if .Values.dfdaemon.mountDataDirAsHostPath }}
|
||||
hostPath:
|
||||
path: {{ .Values.dfdaemon.config.dataDir }}
|
||||
type: DirectoryOrCreate
|
||||
{{- else }}
|
||||
emptyDir: {}
|
||||
{{- end }}
|
||||
{{- if .Values.dfdaemon.extraVolumes }}
|
||||
{{- toYaml .Values.dfdaemon.extraVolumes | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -0,0 +1,33 @@
|
|||
{{- if and .Values.dfdaemon.metrics.enable .Values.dfdaemon.metrics.podMonitor.enable }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PodMonitor
|
||||
metadata:
|
||||
name: {{ template "dragonfly.dfdaemon.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.dfdaemon.name }}
|
||||
{{- if .Values.dfdaemon.metrics.podMonitor.additionalLabels }}
|
||||
{{ toYaml .Values.dfdaemon.metrics.podMonitor.additionalLabels | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
podMetricsEndpoints:
|
||||
- port: 8000
|
||||
path: /metrics
|
||||
{{- if .Values.dfdaemon.metrics.podMonitor.interval }}
|
||||
interval: {{ .Values.dfdaemon.metrics.podMonitor.interval }}
|
||||
{{- end }}
|
||||
{{- if .Values.dfdaemon.metrics.podMonitor.scrapeTimeout }}
|
||||
scrapeTimeout: {{ .Values.dfdaemon.metrics.podMonitor.scrapeTimeout }}
|
||||
{{- end }}
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- {{ .Release.Namespace }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
component: {{ .Values.dfdaemon.name }}
|
||||
{{- end }}
|
|
@ -0,0 +1,20 @@
|
|||
{{- if and .Values.dfdaemon.metrics.enable .Values.dfdaemon.metrics.prometheusRule.enable }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: {{ template "dragonfly.dfdaemon.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.dfdaemon.name }}
|
||||
{{- if .Values.dfdaemon.metrics.prometheusRule.additionalLabels }}
|
||||
{{ toYaml .Values.dfdaemon.metrics.prometheusRule.additionalLabels | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
groups:
|
||||
- name: {{ template "dragonfly.dfdaemon.fullname" $ }}
|
||||
rules:
|
||||
{{ toYaml .Values.dfdaemon.metrics.prometheusRule.rules | indent 8 }}
|
||||
{{- end }}
|
|
@ -3,9 +3,9 @@ apiVersion: v1
|
|||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "dragonfly.manager.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.manager.name }}
|
||||
|
@ -25,7 +25,6 @@ data:
|
|||
start: {{ .Values.manager.grpcPort }}
|
||||
end: {{ .Values.manager.grpcPort }}
|
||||
workHome: {{ .Values.manager.config.server.workHome }}
|
||||
logLevel: {{ .Values.manager.config.server.logLevel }}
|
||||
logDir: {{ .Values.manager.config.server.logDir }}
|
||||
cacheDir: {{ .Values.manager.config.server.cacheDir }}
|
||||
pluginDir: {{ .Values.manager.config.server.pluginDir }}
|
||||
|
@ -59,8 +58,6 @@ data:
|
|||
masterName: {{ .Values.externalRedis.masterName }}
|
||||
username: {{ .Values.externalRedis.username }}
|
||||
password: {{ .Values.externalRedis.password }}
|
||||
sentinelUsername: {{ .Values.externalRedis.sentinelUsername }}
|
||||
sentinelPassword: {{ .Values.externalRedis.sentinelPassword }}
|
||||
db: {{ .Values.externalRedis.db }}
|
||||
brokerDB: {{ .Values.externalRedis.brokerDB }}
|
||||
backendDB: {{ .Values.externalRedis.backendDB }}
|
||||
|
@ -69,13 +66,23 @@ data:
|
|||
{{ toYaml .Values.manager.config.cache | indent 6 }}
|
||||
job:
|
||||
{{ toYaml .Values.manager.config.job | indent 6 }}
|
||||
objectStorage:
|
||||
{{ toYaml .Values.manager.config.objectStorage | indent 6 }}
|
||||
security:
|
||||
{{ toYaml .Values.manager.config.security | indent 6 }}
|
||||
network:
|
||||
{{ toYaml .Values.manager.config.network | indent 6 }}
|
||||
metrics:
|
||||
enable: {{ .Values.manager.metrics.enable }}
|
||||
addr: ":8000"
|
||||
console: {{ .Values.manager.config.console }}
|
||||
pprofPort: {{ .Values.manager.config.pprofPort }}
|
||||
tracing:
|
||||
{{ toYaml .Values.manager.config.tracing | indent 6 }}
|
||||
verbose: {{ .Values.manager.config.verbose }}
|
||||
{{- if .Values.manager.config.verbose }}
|
||||
pprof-port: {{ .Values.manager.config.pprofPort }}
|
||||
{{- end }}
|
||||
{{- if .Values.manager.config.jaeger }}
|
||||
jaeger: {{ .Values.manager.config.jaeger }}
|
||||
{{- else if .Values.jaeger.enable }}
|
||||
jaeger: http://{{ $.Release.Name }}-jaeger-collector.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }}:14268/api/traces
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
|
|
@ -4,11 +4,11 @@ kind: Deployment
|
|||
metadata:
|
||||
labels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.manager.name }}
|
||||
name: {{ template "dragonfly.manager.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
annotations:
|
||||
{{- if .Values.manager.deploymentAnnotations }}
|
||||
{{ toYaml .Values.manager.deploymentAnnotations | indent 4 }}
|
||||
|
@ -22,11 +22,13 @@ spec:
|
|||
matchLabels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
component: {{ .Values.manager.name }}
|
||||
release: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
component: {{ .Values.manager.name }}
|
||||
release: {{ .Release.Name }}
|
||||
{{- if .Values.manager.podLabels }}
|
||||
{{ toYaml .Values.manager.podLabels | indent 8 }}
|
||||
{{- end }}
|
||||
|
@ -36,13 +38,9 @@ spec:
|
|||
{{ toYaml .Values.manager.podAnnotations | indent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
hostNetwork: {{ .Values.manager.hostNetwork }}
|
||||
{{- if .Values.manager.hostNetwork }}
|
||||
dnsPolicy: "ClusterFirstWithHostNet"
|
||||
{{- end }}
|
||||
{{- with .Values.manager.nodeSelector | default .Values.global.nodeSelector }}
|
||||
{{- if .Values.manager.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{ toYaml .Values.manager.nodeSelector | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.manager.tolerations }}
|
||||
tolerations:
|
||||
|
@ -55,10 +53,10 @@ spec:
|
|||
{{- if quote .Values.manager.terminationGracePeriodSeconds }}
|
||||
terminationGracePeriodSeconds: {{ .Values.manager.terminationGracePeriodSeconds }}
|
||||
{{- end }}
|
||||
{{- if .Values.scheduler.priorityClassName }}
|
||||
{{- if and (.Capabilities.APIVersions.Has "scheduling.k8s.io/v1beta1") (.Values.scheduler.priorityClassName) }}
|
||||
priorityClassName: {{ .Values.manager.priorityClassName }}
|
||||
{{- end }}
|
||||
{{- with .Values.manager.image.pullSecrets | default .Values.global.imagePullSecrets }}
|
||||
{{- with .Values.manager.pullSecrets | default .Values.global.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
|
@ -70,27 +68,23 @@ spec:
|
|||
initContainers:
|
||||
{{- if .Values.redis.enable }}
|
||||
- name: wait-for-redis
|
||||
image: {{ template "manager.initContainer.image" . }}
|
||||
imagePullPolicy: {{ .Values.manager.initContainer.image.pullPolicy }}
|
||||
image: {{ .Values.manager.initContainer.image }}:{{ .Values.manager.initContainer.tag }}
|
||||
imagePullPolicy: {{ .Values.manager.initContainer.pullPolicy }}
|
||||
command: ['sh', '-c', 'until nslookup {{ .Release.Name }}-{{ default "redis" .Values.redis.fullname }}-master.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }} && nc -vz {{ .Release.Name }}-{{ default "redis" .Values.redis.fullname }}-master.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }} {{ .Values.redis.master.service.ports.redis }}; do echo waiting for redis; sleep 2; done;']
|
||||
resources:
|
||||
{{ toYaml .Values.manager.initContainer.resources | indent 10 }}
|
||||
{{- end }}
|
||||
{{- if .Values.mysql.enable }}
|
||||
- name: wait-for-mysql
|
||||
image: {{ template "manager.initContainer.image" . }}
|
||||
imagePullPolicy: {{ .Values.manager.initContainer.image.pullPolicy }}
|
||||
image: {{ .Values.manager.initContainer.image }}:{{ .Values.manager.initContainer.tag }}
|
||||
imagePullPolicy: {{ .Values.manager.initContainer.pullPolicy }}
|
||||
command: ['sh', '-c', 'until nslookup {{ .Release.Name }}-{{ default "mysql" .Values.mysql.fullname }}.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }} && nc -vz {{ .Release.Name }}-{{ default "mysql" .Values.mysql.fullname }}.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }} {{ .Values.mysql.primary.service.port }}; do echo waiting for mysql; sleep 2; done;']
|
||||
resources:
|
||||
{{ toYaml .Values.manager.initContainer.resources | indent 10 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: manager
|
||||
image: {{ template "manager.image" . }}
|
||||
imagePullPolicy: {{ .Values.manager.image.pullPolicy | quote }}
|
||||
image: "{{ .Values.manager.image }}:{{ .Values.manager.tag }}"
|
||||
imagePullPolicy: {{ .Values.manager.pullPolicy | quote }}
|
||||
resources:
|
||||
{{ toYaml .Values.manager.resources | indent 10 }}
|
||||
{{ toYaml .Values.manager.resources | indent 12 }}
|
||||
env:
|
||||
{{- if .Values.manager.maxProcs }}
|
||||
- name: GOMAXPROCS
|
||||
|
@ -121,6 +115,21 @@ spec:
|
|||
command: ["/bin/grpc_health_probe", "-addr=:{{ .Values.manager.grpcPort }}"]
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 10
|
||||
{{- if .Values.manager.sidecar }}
|
||||
- name: {{ .Values.manager.sidecar.name }}
|
||||
image: {{ .Values.manager.sidecar.image }}:{{ .Values.manager.sidecar.tag }}
|
||||
args:
|
||||
{{- range .Values.manager.sidecar.args }}
|
||||
- {{ . }}
|
||||
{{- end }}
|
||||
command:
|
||||
{{- range .Values.manager.sidecar.command }}
|
||||
- {{ . }}
|
||||
{{- end }}
|
||||
imagePullPolicy: {{ .Values.manager.sidecar.pullPolicy | quote }}
|
||||
resources:
|
||||
{{ toYaml .Values.manager.sidecar.resources | indent 12 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
|
|
|
@ -9,9 +9,9 @@ apiVersion: extensions/v1beta1
|
|||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ template "dragonfly.manager.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.manager.name }}
|
||||
|
|
|
@ -3,9 +3,9 @@ apiVersion: v1
|
|||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "dragonfly.manager.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.manager.name }}
|
||||
|
@ -21,18 +21,14 @@ spec:
|
|||
ports:
|
||||
- port: {{ .Values.manager.restPort }}
|
||||
name: http-rest
|
||||
appProtocol: http
|
||||
protocol: TCP
|
||||
targetPort: {{ .Values.manager.restPort }}
|
||||
- port: {{ .Values.manager.grpcPort }}
|
||||
name: grpc
|
||||
appProtocol: grpc
|
||||
name: http-grpc
|
||||
protocol: TCP
|
||||
targetPort: {{ .Values.manager.grpcPort }}
|
||||
{{- if eq .Values.manager.service.type "NodePort" }}
|
||||
nodePort: {{ .Values.manager.service.nodePort }}
|
||||
{{- end }}
|
||||
selector:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
release: {{ .Release.Name }}
|
||||
component: {{ .Values.manager.name }}
|
||||
{{- end }}
|
||||
|
|
|
@ -3,14 +3,14 @@ apiVersion: v1
|
|||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "dragonfly.manager.fullname" . }}-metrics
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.manager.name }}-metrics
|
||||
{{- if .Values.manager.metrics.service.labels }}
|
||||
{{ toYaml .Values.manager.metrics.service.labels | indent 4 }}
|
||||
{{ toYaml .Values.metrics.service.labels | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.manager.metrics.service.annotations }}
|
||||
annotations:
|
||||
|
@ -21,10 +21,10 @@ spec:
|
|||
ports:
|
||||
- port: 8000
|
||||
name: http-metrics
|
||||
appProtocol: http
|
||||
protocol: TCP
|
||||
targetPort: 8000
|
||||
protocol: TCP
|
||||
selector:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
release: {{ .Release.Name }}
|
||||
component: {{ .Values.manager.name }}
|
||||
{{- end }}
|
||||
|
|
|
@ -3,9 +3,9 @@ apiVersion: monitoring.coreos.com/v1
|
|||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: {{ template "dragonfly.manager.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.manager.name }}
|
||||
|
|
|
@ -3,9 +3,9 @@ apiVersion: monitoring.coreos.com/v1
|
|||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ template "dragonfly.manager.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.manager.name }}
|
||||
|
@ -27,5 +27,6 @@ spec:
|
|||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
component: {{ .Values.manager.name }}-metrics
|
||||
{{- end }}
|
||||
|
|
|
@ -3,14 +3,14 @@ apiVersion: v1
|
|||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "dragonfly.scheduler.fullname" . }}-metrics
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.scheduler.name }}-metrics
|
||||
{{- if .Values.scheduler.metrics.service.labels }}
|
||||
{{ toYaml .Values.scheduler.metrics.service.labels | indent 4 }}
|
||||
{{ toYaml .Values.metrics.service.labels | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.scheduler.metrics.service.annotations }}
|
||||
annotations:
|
||||
|
@ -21,10 +21,10 @@ spec:
|
|||
ports:
|
||||
- port: 8000
|
||||
name: http-metrics
|
||||
appProtocol: http
|
||||
protocol: TCP
|
||||
targetPort: 8000
|
||||
protocol: TCP
|
||||
selector:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
component: {{ .Values.scheduler.name }}
|
||||
release: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
|
|
@ -3,9 +3,9 @@ apiVersion: monitoring.coreos.com/v1
|
|||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: {{ template "dragonfly.scheduler.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.scheduler.name }}
|
||||
|
|
|
@ -3,9 +3,9 @@ apiVersion: v1
|
|||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "dragonfly.scheduler.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.scheduler.name }}
|
||||
|
@ -27,11 +27,11 @@ data:
|
|||
masterName: {{ .Values.externalRedis.masterName }}
|
||||
username: {{ .Values.externalRedis.username }}
|
||||
password: {{ .Values.externalRedis.password }}
|
||||
sentinelUsername: {{ .Values.externalRedis.sentinelUsername }}
|
||||
sentinelPassword: {{ .Values.externalRedis.sentinelPassword }}
|
||||
brokerDB: {{ .Values.externalRedis.brokerDB }}
|
||||
backendDB: {{ .Values.externalRedis.backendDB }}
|
||||
{{- end }}
|
||||
resource:
|
||||
{{ toYaml .Values.scheduler.config.resource | indent 6 }}
|
||||
dynconfig:
|
||||
{{ toYaml .Values.scheduler.config.dynconfig | indent 6 }}
|
||||
host:
|
||||
|
@ -59,13 +59,13 @@ data:
|
|||
masterName: {{ .Values.externalRedis.masterName }}
|
||||
username: {{ .Values.externalRedis.username }}
|
||||
password: {{ .Values.externalRedis.password }}
|
||||
sentinelUsername: {{ .Values.externalRedis.sentinelUsername }}
|
||||
sentinelPassword: {{ .Values.externalRedis.sentinelPassword }}
|
||||
brokerDB: {{ .Values.externalRedis.brokerDB }}
|
||||
backendDB: {{ .Values.externalRedis.backendDB }}
|
||||
{{- end }}
|
||||
storage:
|
||||
{{ toYaml .Values.scheduler.config.storage | indent 6 }}
|
||||
security:
|
||||
{{ toYaml .Values.scheduler.config.security | indent 6 }}
|
||||
network:
|
||||
{{ toYaml .Values.scheduler.config.network | indent 6 }}
|
||||
metrics:
|
||||
|
@ -73,7 +73,13 @@ data:
|
|||
addr: ":8000"
|
||||
enableHost: {{ .Values.scheduler.metrics.enableHost }}
|
||||
console: {{ .Values.scheduler.config.console }}
|
||||
pprofPort: {{ .Values.scheduler.config.pprofPort }}
|
||||
tracing:
|
||||
{{ toYaml .Values.scheduler.config.tracing | indent 6 }}
|
||||
verbose: {{ .Values.scheduler.config.verbose }}
|
||||
{{- if .Values.scheduler.config.verbose }}
|
||||
pprof-port: {{ .Values.scheduler.config.pprofPort }}
|
||||
{{- end }}
|
||||
{{- if .Values.scheduler.config.jaeger }}
|
||||
jaeger: {{ .Values.scheduler.config.jaeger }}
|
||||
{{- else if .Values.jaeger.enable }}
|
||||
jaeger: http://{{ $.Release.Name }}-jaeger-collector.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }}:14268/api/traces
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
|
|
@ -3,9 +3,9 @@ apiVersion: apps/v1
|
|||
kind: StatefulSet
|
||||
metadata:
|
||||
name: {{ template "dragonfly.scheduler.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.scheduler.name }}
|
||||
|
@ -22,12 +22,14 @@ spec:
|
|||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
release: {{ .Release.Name }}
|
||||
component: {{ .Values.scheduler.name }}
|
||||
serviceName: scheduler
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
release: {{ .Release.Name }}
|
||||
component: {{ .Values.scheduler.name }}
|
||||
{{- if .Values.scheduler.podLabels }}
|
||||
{{ toYaml .Values.scheduler.podLabels | indent 8 }}
|
||||
|
@ -38,13 +40,9 @@ spec:
|
|||
{{ toYaml .Values.scheduler.podAnnotations | indent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
hostNetwork: {{ .Values.scheduler.hostNetwork }}
|
||||
{{- if .Values.scheduler.hostNetwork }}
|
||||
dnsPolicy: "ClusterFirstWithHostNet"
|
||||
{{- end }}
|
||||
{{- with .Values.scheduler.nodeSelector | default .Values.global.nodeSelector }}
|
||||
{{- if .Values.scheduler.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{ toYaml .Values.scheduler.nodeSelector | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.scheduler.tolerations }}
|
||||
tolerations:
|
||||
|
@ -57,10 +55,10 @@ spec:
|
|||
{{- if quote .Values.scheduler.terminationGracePeriodSeconds }}
|
||||
terminationGracePeriodSeconds: {{ .Values.scheduler.terminationGracePeriodSeconds }}
|
||||
{{- end }}
|
||||
{{- if .Values.scheduler.priorityClassName }}
|
||||
{{- if and (.Capabilities.APIVersions.Has "scheduling.k8s.io/v1beta1") (.Values.scheduler.priorityClassName) }}
|
||||
priorityClassName: {{ .Values.scheduler.priorityClassName }}
|
||||
{{- end }}
|
||||
{{- with .Values.scheduler.image.pullSecrets | default .Values.global.imagePullSecrets }}
|
||||
{{- with .Values.scheduler.pullSecrets | default .Values.global.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
|
@ -70,21 +68,19 @@ spec:
|
|||
{{- end }}
|
||||
initContainers:
|
||||
- name: wait-for-manager
|
||||
image: {{ template "scheduler.initContainer.image" . }}
|
||||
imagePullPolicy: {{ .Values.scheduler.initContainer.image.pullPolicy }}
|
||||
image: {{ .Values.scheduler.initContainer.image }}:{{ .Values.scheduler.initContainer.tag }}
|
||||
imagePullPolicy: {{ .Values.scheduler.initContainer.pullPolicy }}
|
||||
{{- if .Values.manager.enable }}
|
||||
command: ['sh', '-c', 'until nslookup {{ template "dragonfly.manager.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} && nc -vz {{ template "dragonfly.manager.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} {{ .Values.manager.restPort }}; do echo waiting for manager; sleep 2; done;']
|
||||
{{- else }}
|
||||
command: ['sh', '-c', 'until nslookup {{ .Values.externalManager.host }} && nc -vz {{ .Values.externalManager.host }} {{ .Values.externalManager.restPort }}; do echo waiting for external manager; sleep 2; done;']
|
||||
{{- end }}
|
||||
resources:
|
||||
{{ toYaml .Values.scheduler.initContainer.resources | indent 10 }}
|
||||
containers:
|
||||
- name: scheduler
|
||||
image: {{ template "scheduler.image" . }}
|
||||
imagePullPolicy: {{ .Values.scheduler.image.pullPolicy | quote }}
|
||||
image: "{{ .Values.scheduler.image }}:{{ .Values.scheduler.tag }}"
|
||||
imagePullPolicy: {{ .Values.scheduler.pullPolicy | quote }}
|
||||
resources:
|
||||
{{ toYaml .Values.scheduler.resources | indent 10 }}
|
||||
{{ toYaml .Values.scheduler.resources | indent 12 }}
|
||||
env:
|
||||
{{- if .Values.scheduler.maxProcs }}
|
||||
- name: GOMAXPROCS
|
||||
|
|
|
@ -3,9 +3,9 @@ apiVersion: v1
|
|||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "dragonfly.scheduler.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.scheduler.name }}
|
||||
|
@ -20,14 +20,11 @@ spec:
|
|||
type: {{ .Values.scheduler.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.scheduler.config.server.port }}
|
||||
name: grpc
|
||||
appProtocol: grpc
|
||||
name: http-grpc
|
||||
protocol: TCP
|
||||
targetPort: {{ .Values.scheduler.config.server.port }}
|
||||
{{- if eq .Values.scheduler.service.type "NodePort" }}
|
||||
nodePort: {{ .Values.scheduler.service.nodePort }}
|
||||
{{- end }}
|
||||
selector:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
release: {{ .Release.Name }}
|
||||
component: {{ .Values.scheduler.name }}
|
||||
{{- end }}
|
||||
|
|
|
@ -3,9 +3,9 @@ apiVersion: monitoring.coreos.com/v1
|
|||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ template "dragonfly.scheduler.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.scheduler.name }}
|
||||
|
@ -27,5 +27,6 @@ spec:
|
|||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
component: {{ .Values.scheduler.name }}-metrics
|
||||
{{- end }}
|
||||
|
|
|
@ -1,16 +1,16 @@
|
|||
{{- if .Values.seedClient.metrics.enable }}
|
||||
{{- if and .Values.seedClient.metrics.enable .Values.seedClient.metrics.serviceMonitor.enable }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "dragonfly.seedClient.fullname" . }}-metrics
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.seedClient.name }}-metrics
|
||||
{{- if .Values.seedClient.metrics.service.labels }}
|
||||
{{ toYaml .Values.seedClient.metrics.service.labels | indent 4 }}
|
||||
{{ toYaml .Values.metrics.service.labels | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.seedClient.metrics.service.annotations }}
|
||||
annotations:
|
||||
|
@ -19,12 +19,12 @@ metadata:
|
|||
spec:
|
||||
type: {{ .Values.seedClient.metrics.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.seedClient.config.metrics.server.port }}
|
||||
- port: {{ .Values.seedClient.config.metrics.port }}
|
||||
name: http-metrics
|
||||
appProtocol: http
|
||||
targetPort: {{ .Values.seedClient.config.metrics.port }}
|
||||
protocol: TCP
|
||||
targetPort: {{ .Values.seedClient.config.metrics.server.port }}
|
||||
selector:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
component: {{ .Values.seedClient.name }}
|
||||
release: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
|
|
@ -3,9 +3,9 @@ apiVersion: monitoring.coreos.com/v1
|
|||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: {{ template "dragonfly.seedClient.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.seedClient.name }}
|
||||
|
|
|
@ -3,9 +3,9 @@ apiVersion: v1
|
|||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "dragonfly.seedClient.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.seedClient.name }}
|
||||
|
@ -20,12 +20,13 @@ data:
|
|||
upload:
|
||||
{{ toYaml .Values.seedClient.config.upload | indent 6 }}
|
||||
manager:
|
||||
{{- if .Values.seedClient.config.manager.addrs }}
|
||||
addr: {{ .Values.seedClient.config.manager.addr }}
|
||||
addrs:
|
||||
{{- if .Values.client.config.manager.addrs }}
|
||||
{{ toYaml .Values.client.config.manager.netAddrs | indent 6 }}
|
||||
{{- else if .Values.manager.enable }}
|
||||
addr: http://{{ template "dragonfly.manager.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.manager.grpcPort }}
|
||||
- http://{{ template "dragonfly.manager.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.manager.grpcPort }}
|
||||
{{- else }}
|
||||
addr: http://{{ .Values.externalManager.host }}:{{ .Values.externalManager.grpcPort }}
|
||||
- {{ .Values.externalManager.host }}:{{ .Values.externalManager.grpcPort }}
|
||||
{{- end }}
|
||||
scheduler:
|
||||
{{ toYaml .Values.seedClient.config.scheduler | indent 6 }}
|
||||
|
@ -39,12 +40,14 @@ data:
|
|||
{{ toYaml .Values.seedClient.config.gc | indent 6 }}
|
||||
proxy:
|
||||
{{ toYaml .Values.seedClient.config.proxy | indent 6 }}
|
||||
health:
|
||||
{{ toYaml .Values.seedClient.config.health | indent 6 }}
|
||||
security:
|
||||
{{ toYaml .Values.seedClient.config.security | indent 6 }}
|
||||
metrics:
|
||||
{{ toYaml .Values.seedClient.config.metrics | indent 6 }}
|
||||
stats:
|
||||
{{ toYaml .Values.seedClient.config.stats | indent 6 }}
|
||||
{{- if .Values.seedClient.config.tracing }}
|
||||
tracing:
|
||||
{{ toYaml .Values.seedClient.config.tracing | indent 6 }}
|
||||
{{- else if .Values.jaeger.enable }}
|
||||
jaeger: http://{{ $.Release.Name }}-jaeger-collector.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }}:14268/api/traces
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
|
|
@ -4,11 +4,11 @@ kind: StatefulSet
|
|||
metadata:
|
||||
labels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.seedClient.name }}
|
||||
name: {{ template "dragonfly.seedClient.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
annotations:
|
||||
{{- if .Values.seedClient.statefulsetAnnotations }}
|
||||
{{ toYaml .Values.seedClient.statefulsetAnnotations | indent 4 }}
|
||||
|
@ -23,12 +23,14 @@ spec:
|
|||
matchLabels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
component: {{ .Values.seedClient.name }}
|
||||
release: {{ .Release.Name }}
|
||||
serviceName: seed-client
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
component: {{ .Values.seedClient.name }}
|
||||
release: {{ .Release.Name }}
|
||||
{{- if .Values.seedClient.podLabels }}
|
||||
{{ toYaml .Values.seedClient.podLabels | indent 8 }}
|
||||
{{- end }}
|
||||
|
@ -38,13 +40,9 @@ spec:
|
|||
{{ toYaml .Values.seedClient.podAnnotations | indent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
hostNetwork: {{ .Values.seedClient.hostNetwork }}
|
||||
{{- if .Values.seedClient.hostNetwork }}
|
||||
dnsPolicy: "ClusterFirstWithHostNet"
|
||||
{{- end }}
|
||||
{{- with .Values.seedClient.nodeSelector | default .Values.global.nodeSelector }}
|
||||
{{- if .Values.seedClient.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{ toYaml .Values.seedClient.nodeSelector | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.seedClient.tolerations }}
|
||||
tolerations:
|
||||
|
@ -57,10 +55,10 @@ spec:
|
|||
{{- if quote .Values.seedClient.terminationGracePeriodSeconds }}
|
||||
terminationGracePeriodSeconds: {{ .Values.seedClient.terminationGracePeriodSeconds }}
|
||||
{{- end }}
|
||||
{{- if .Values.seedClient.priorityClassName }}
|
||||
{{- if and (.Capabilities.APIVersions.Has "scheduling.k8s.io/v1beta1") (.Values.seedClient.priorityClassName) }}
|
||||
priorityClassName: {{ .Values.seedClient.priorityClassName }}
|
||||
{{- end }}
|
||||
{{- with .Values.seedClient.image.pullSecrets | default .Values.global.imagePullSecrets }}
|
||||
{{- with .Values.seedClient.pullSecrets | default .Values.global.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
|
@ -70,26 +68,24 @@ spec:
|
|||
{{- end }}
|
||||
initContainers:
|
||||
- name: wait-for-manager
|
||||
image: {{ template "seedClient.initContainer.image" . }}
|
||||
imagePullPolicy: {{ .Values.seedClient.initContainer.image.pullPolicy }}
|
||||
image: {{ .Values.seedClient.initContainer.image }}:{{ .Values.seedClient.initContainer.tag }}
|
||||
imagePullPolicy: {{ .Values.seedClient.initContainer.pullPolicy }}
|
||||
{{- if .Values.manager.enable }}
|
||||
command: ['sh', '-c', 'until nslookup {{ template "dragonfly.manager.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} && nc -vz {{ template "dragonfly.manager.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} {{ .Values.manager.restPort }}; do echo waiting for manager; sleep 2; done;']
|
||||
{{- else }}
|
||||
command: ['sh', '-c', 'until nslookup {{ .Values.externalManager.host }} && nc -vz {{ .Values.externalManager.host }} {{ .Values.externalManager.restPort }}; do echo waiting for external manager; sleep 2; done;']
|
||||
{{- end }}
|
||||
resources:
|
||||
{{ toYaml .Values.seedClient.initContainer.resources | indent 10 }}
|
||||
containers:
|
||||
- name: seed-client
|
||||
image: {{ template "seedClient.image" . }}
|
||||
imagePullPolicy: {{ .Values.seedClient.image.pullPolicy | quote }}
|
||||
image: "{{ .Values.seedClient.image }}:{{ .Values.seedClient.tag }}"
|
||||
imagePullPolicy: {{ .Values.seedClient.pullPolicy | quote }}
|
||||
args:
|
||||
- --log-level={{ .Values.client.config.log.level }}
|
||||
{{- if .Values.seedClient.config.console }}
|
||||
- --console
|
||||
{{- if .Values.seedClient.config.verbose }}
|
||||
- --verbose
|
||||
{{- end }}
|
||||
resources:
|
||||
{{ toYaml .Values.seedClient.resources | indent 10 }}
|
||||
{{ toYaml .Values.seedClient.resources | indent 12 }}
|
||||
env:
|
||||
{{- if .Values.seedClient.maxProcs }}
|
||||
- name: GOMAXPROCS
|
||||
|
@ -98,26 +94,20 @@ spec:
|
|||
ports:
|
||||
- containerPort: {{ .Values.seedClient.config.upload.server.port }}
|
||||
protocol: TCP
|
||||
- containerPort: {{ .Values.seedClient.config.proxy.server.port }}
|
||||
protocol: TCP
|
||||
- containerPort: {{ .Values.seedClient.config.health.server.port }}
|
||||
protocol: TCP
|
||||
- containerPort: {{ .Values.seedClient.config.metrics.server.port }}
|
||||
protocol: TCP
|
||||
- containerPort: {{ .Values.seedClient.config.stats.server.port }}
|
||||
{{- if .Values.seedClient.metrics.enable }}
|
||||
- containerPort: {{ .Values.seedClient.config.metrics.port }}
|
||||
protocol: TCP
|
||||
{{- end }}
|
||||
readinessProbe:
|
||||
exec:
|
||||
command: ["/bin/grpc_health_probe", "-addr=unix://{{ .Values.seedClient.config.download.server.socketPath }}"]
|
||||
command: ["/bin/grpc_health_probe", "-addr=:{{ .Values.seedClient.config.upload.server.port }}"]
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 30
|
||||
timeoutSeconds: 5
|
||||
periodSeconds: 10
|
||||
livenessProbe:
|
||||
exec:
|
||||
command: ["/bin/grpc_health_probe", "-addr=unix://{{ .Values.seedClient.config.download.server.socketPath }}"]
|
||||
command: ["/bin/grpc_health_probe", "-addr=:{{ .Values.seedClient.config.upload.server.port }}"]
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 30
|
||||
timeoutSeconds: 5
|
||||
periodSeconds: 10
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: "/etc/dragonfly"
|
||||
|
@ -152,6 +142,12 @@ spec:
|
|||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.seedClient.persistence.size | quote }}
|
||||
{{- include "common.storage.class" (dict "persistence" .Values.seedClient.persistence "global" .Values.global) | nindent 8 }}
|
||||
{{- if .Values.seedClient.persistence.storageClass }}
|
||||
{{- if (eq "-" .Values.seedClient.persistence.storageClass) }}
|
||||
storageClassName: ""
|
||||
{{- else }}
|
||||
storageClassName: "{{ .Values.seedClient.persistence.storageClass }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
|
|
@ -1,43 +0,0 @@
|
|||
{{- if .Values.seedClient.enable }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "dragonfly.seedClient.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.seedClient.name }}
|
||||
{{- if .Values.seedClient.service.labels }}
|
||||
{{ toYaml .Values.seedClient.service.labels | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.seedClient.service.annotations }}
|
||||
annotations:
|
||||
{{ toYaml .Values.seedClient.service.annotations | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
type: {{ .Values.seedClient.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.seedClient.config.proxy.server.port }}
|
||||
name: http-proxy
|
||||
appProtocol: http
|
||||
protocol: TCP
|
||||
targetPort: {{ .Values.seedClient.config.proxy.server.port }}
|
||||
- port: {{ .Values.seedClient.config.health.server.port }}
|
||||
name: http-health
|
||||
appProtocol: http
|
||||
protocol: TCP
|
||||
targetPort: {{ .Values.seedClient.config.health.server.port }}
|
||||
- port: {{ .Values.seedClient.config.stats.server.port }}
|
||||
name: http-stats
|
||||
appProtocol: http
|
||||
protocol: TCP
|
||||
targetPort: {{ .Values.seedClient.config.stats.server.port }}
|
||||
{{- if eq .Values.seedClient.service.type "NodePort" }}
|
||||
nodePort: {{ .Values.seedClient.service.nodePort }}
|
||||
{{- end }}
|
||||
selector:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
component: {{ .Values.seedClient.name }}
|
||||
{{- end }}
|
|
@ -3,9 +3,9 @@ apiVersion: monitoring.coreos.com/v1
|
|||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ template "dragonfly.seedClient.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.seedClient.name }}
|
||||
|
@ -27,5 +27,6 @@ spec:
|
|||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
component: {{ .Values.seedClient.name }}-metrics
|
||||
{{- end }}
|
||||
|
|
|
@ -0,0 +1,30 @@
|
|||
{{- if and .Values.seedPeer.metrics.enable .Values.seedPeer.metrics.serviceMonitor.enable }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "dragonfly.seedPeer.fullname" . }}-metrics
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.seedPeer.name }}-metrics
|
||||
{{- if .Values.seedPeer.metrics.service.labels }}
|
||||
{{ toYaml .Values.metrics.service.labels | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.seedPeer.metrics.service.annotations }}
|
||||
annotations:
|
||||
{{ toYaml .Values.seedPeer.metrics.service.annotations | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
type: {{ .Values.seedPeer.metrics.service.type }}
|
||||
ports:
|
||||
- port: 8000
|
||||
name: http-metrics
|
||||
targetPort: 8000
|
||||
protocol: TCP
|
||||
selector:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
component: {{ .Values.seedPeer.name }}
|
||||
release: {{ .Release.Name }}
|
||||
{{- end }}
|
|
@ -0,0 +1,20 @@
|
|||
{{- if and .Values.seedPeer.metrics.enable .Values.seedPeer.metrics.prometheusRule.enable }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: {{ template "dragonfly.seedPeer.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.seedPeer.name }}
|
||||
{{- if .Values.seedPeer.metrics.prometheusRule.additionalLabels }}
|
||||
{{ toYaml .Values.seedPeer.metrics.prometheusRule.additionalLabels | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
groups:
|
||||
- name: {{ template "dragonfly.seedPeer.fullname" $ }}
|
||||
rules:
|
||||
{{ toYaml .Values.seedPeer.metrics.prometheusRule.rules | indent 8 }}
|
||||
{{- end }}
|
|
@ -0,0 +1,85 @@
|
|||
{{- if .Values.seedPeer.enable }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "dragonfly.seedPeer.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.seedPeer.name }}
|
||||
data:
|
||||
dfget.yaml: |-
|
||||
aliveTime: {{ .Values.seedPeer.config.aliveTime }}
|
||||
gcInterval: {{ .Values.seedPeer.config.gcInterval }}
|
||||
keepStorage: {{ .Values.seedPeer.config.keepStorage }}
|
||||
workHome: {{ .Values.seedPeer.config.workHome }}
|
||||
logDir: {{ .Values.seedPeer.config.logDir }}
|
||||
cacheDir: {{ .Values.seedPeer.config.cacheDir }}
|
||||
pluginDir: {{ .Values.seedPeer.config.pluginDir }}
|
||||
dataDir: {{ .Values.seedPeer.config.dataDir }}
|
||||
console: {{ .Values.seedPeer.config.console }}
|
||||
health:
|
||||
{{ toYaml .Values.dfdaemon.config.health | indent 6 }}
|
||||
verbose: {{ .Values.seedPeer.config.verbose }}
|
||||
{{- if .Values.seedPeer.config.verbose }}
|
||||
pprof-port: {{ .Values.seedPeer.config.pprofPort }}
|
||||
{{- end }}
|
||||
{{- if .Values.seedPeer.metrics.enable }}
|
||||
metrics: ":8000"
|
||||
{{- end }}
|
||||
{{- if .Values.seedPeer.config.jaeger }}
|
||||
jaeger: {{ .Values.seedPeer.config.jaeger }}
|
||||
{{- else if .Values.jaeger.enable }}
|
||||
jaeger: http://{{ $.Release.Name }}-jaeger-collector.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }}:14268/api/traces
|
||||
{{- end }}
|
||||
scheduler:
|
||||
manager:
|
||||
enable: {{ .Values.seedPeer.config.scheduler.manager.enable }}
|
||||
netAddrs:
|
||||
{{- if and (.Values.seedPeer.config.scheduler.manager.enable) (.Values.seedPeer.config.scheduler.manager.netAddrs) }}
|
||||
{{ toYaml .Values.seedPeer.config.scheduler.manager.netAddrs | indent 10 }}
|
||||
{{- else if .Values.manager.enable }}
|
||||
- type: tcp
|
||||
addr: {{ template "dragonfly.manager.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.manager.grpcPort }}
|
||||
{{- else }}
|
||||
- type: tcp
|
||||
addr: {{ .Values.externalManager.host }}:{{ .Values.externalManager.grpcPort }}
|
||||
{{- end }}
|
||||
refreshInterval: {{ .Values.seedPeer.config.scheduler.manager.refreshInterval }}
|
||||
seedPeer:
|
||||
{{ toYaml .Values.seedPeer.config.scheduler.manager.seedPeer | indent 10 }}
|
||||
scheduleTimeout: {{ .Values.seedPeer.config.scheduler.scheduleTimeout }}
|
||||
disableAutoBackSource: {{ .Values.seedPeer.config.scheduler.disableAutoBackSource }}
|
||||
host:
|
||||
{{ toYaml .Values.seedPeer.config.host | indent 6 }}
|
||||
download:
|
||||
{{ toYaml .Values.seedPeer.config.download | indent 6 }}
|
||||
upload:
|
||||
{{ toYaml .Values.seedPeer.config.upload | indent 6 }}
|
||||
storage:
|
||||
{{ toYaml .Values.seedPeer.config.storage | indent 6 }}
|
||||
proxy:
|
||||
defaultFilter: {{ .Values.seedPeer.config.proxy.defaultFilter }}
|
||||
defaultTag: {{ .Values.seedPeer.config.proxy.defaultTag }}
|
||||
tcpListen:
|
||||
{{- if not .Values.seedPeer.hostNetwork }}
|
||||
namespace: {{ .Values.seedPeer.config.proxy.tcpListen.namespace }}
|
||||
{{- end }}
|
||||
port: {{ .Values.seedPeer.containerPort }}
|
||||
security:
|
||||
{{ toYaml .Values.seedPeer.config.proxy.security | indent 8 }}
|
||||
registryMirror:
|
||||
{{ toYaml .Values.seedPeer.config.proxy.registryMirror | indent 8 }}
|
||||
proxies:
|
||||
{{ toYaml .Values.seedPeer.config.proxy.proxies | indent 8 }}
|
||||
objectStorage:
|
||||
{{ toYaml .Values.seedPeer.config.objectStorage | indent 6 }}
|
||||
security:
|
||||
{{ toYaml .Values.seedPeer.config.security | indent 6 }}
|
||||
network:
|
||||
{{ toYaml .Values.seedPeer.config.network | indent 6 }}
|
||||
announcer:
|
||||
{{ toYaml .Values.seedPeer.config.announcer | indent 6 }}
|
||||
{{- end }}
|
|
@ -0,0 +1,157 @@
|
|||
{{- if .Values.seedPeer.enable }}
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.seedPeer.name }}
|
||||
name: {{ template "dragonfly.seedPeer.fullname" . }}
|
||||
annotations:
|
||||
{{- if .Values.seedPeer.statefulsetAnnotations }}
|
||||
{{ toYaml .Values.seedPeer.statefulsetAnnotations | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.seedPeer.updateStrategy }}
|
||||
updateStrategy:
|
||||
{{ toYaml .Values.seedPeer.updateStrategy | indent 4 }}
|
||||
{{- end }}
|
||||
replicas: {{ .Values.seedPeer.replicas }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
component: {{ .Values.seedPeer.name }}
|
||||
release: {{ .Release.Name }}
|
||||
serviceName: seed-peer
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
component: {{ .Values.seedPeer.name }}
|
||||
release: {{ .Release.Name }}
|
||||
{{- if .Values.seedPeer.podLabels }}
|
||||
{{ toYaml .Values.seedPeer.podLabels | indent 8 }}
|
||||
{{- end }}
|
||||
annotations:
|
||||
checksum/config: {{ include (print $.Template.BasePath "/seed-peer/seed-peer-configmap.yaml") . | sha256sum }}
|
||||
{{- if .Values.seedPeer.podAnnotations }}
|
||||
{{ toYaml .Values.seedPeer.podAnnotations | indent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.seedPeer.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml .Values.seedPeer.nodeSelector | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.seedPeer.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml .Values.seedPeer.tolerations | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.seedPeer.affinity }}
|
||||
affinity:
|
||||
{{ toYaml .Values.seedPeer.affinity | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if quote .Values.seedPeer.terminationGracePeriodSeconds }}
|
||||
terminationGracePeriodSeconds: {{ .Values.seedPeer.terminationGracePeriodSeconds }}
|
||||
{{- end }}
|
||||
{{- if and (.Capabilities.APIVersions.Has "scheduling.k8s.io/v1beta1") (.Values.seedPeer.priorityClassName) }}
|
||||
priorityClassName: {{ .Values.seedPeer.priorityClassName }}
|
||||
{{- end }}
|
||||
{{- with .Values.seedPeer.pullSecrets | default .Values.global.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.seedPeer.hostAliases }}
|
||||
hostAliases:
|
||||
{{ toYaml .Values.seedPeer.hostAliases | indent 8 }}
|
||||
{{- end }}
|
||||
initContainers:
|
||||
- name: wait-for-manager
|
||||
image: {{ .Values.seedPeer.initContainer.image }}:{{ .Values.seedPeer.initContainer.tag }}
|
||||
imagePullPolicy: {{ .Values.seedPeer.initContainer.pullPolicy }}
|
||||
{{- if .Values.manager.enable }}
|
||||
command: ['sh', '-c', 'until nslookup {{ template "dragonfly.manager.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} && nc -vz {{ template "dragonfly.manager.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} {{ .Values.manager.restPort }}; do echo waiting for manager; sleep 2; done;']
|
||||
{{- else }}
|
||||
command: ['sh', '-c', 'until nslookup {{ .Values.externalManager.host }} && nc -vz {{ .Values.externalManager.host }} {{ .Values.externalManager.restPort }}; do echo waiting for external manager; sleep 2; done;']
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: seed-peer
|
||||
image: "{{ .Values.seedPeer.image }}:{{ .Values.seedPeer.tag }}"
|
||||
imagePullPolicy: {{ .Values.seedPeer.pullPolicy | quote }}
|
||||
resources:
|
||||
{{ toYaml .Values.seedPeer.resources | indent 12 }}
|
||||
env:
|
||||
{{- if .Values.seedPeer.maxProcs }}
|
||||
- name: GOMAXPROCS
|
||||
value: {{ .Values.seedPeer.maxProcs }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- containerPort: {{ .Values.seedPeer.config.download.peerGRPC.tcpListen.port }}
|
||||
protocol: TCP
|
||||
- containerPort: {{ .Values.seedPeer.config.upload.tcpListen.port }}
|
||||
protocol: TCP
|
||||
{{- if .Values.seedPeer.config.objectStorage.enable }}
|
||||
- containerPort: {{ .Values.seedPeer.config.objectStorage.tcpListen.port }}
|
||||
protocol: TCP
|
||||
{{- end }}
|
||||
{{- if .Values.seedPeer.metrics.enable }}
|
||||
- containerPort: 8000
|
||||
protocol: TCP
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: "/etc/dragonfly"
|
||||
- name: storage
|
||||
mountPath: {{ .Values.seedPeer.config.dataDir }}
|
||||
{{- if .Values.seedPeer.extraVolumeMounts }}
|
||||
{{- toYaml .Values.seedPeer.extraVolumeMounts | nindent 8 }}
|
||||
{{- end }}
|
||||
readinessProbe:
|
||||
exec:
|
||||
command: ["/bin/grpc_health_probe", "-addr=:{{ .Values.seedPeer.config.download.peerGRPC.tcpListen.port }}"]
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
livenessProbe:
|
||||
exec:
|
||||
command: ["/bin/grpc_health_probe", "-addr=:{{ .Values.seedPeer.config.download.peerGRPC.tcpListen.port }}"]
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 10
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: {{ template "dragonfly.seedPeer.fullname" $ }}
|
||||
items:
|
||||
- key: dfget.yaml
|
||||
path: dfget.yaml
|
||||
{{- if not (.Values.seedPeer.persistence.enable) }}
|
||||
- name: storage
|
||||
emptyDir: {}
|
||||
{{- end }}
|
||||
{{- if .Values.seedPeer.extraVolumes }}
|
||||
{{- toYaml .Values.seedPeer.extraVolumes | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.seedPeer.persistence.enable }}
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: storage
|
||||
{{- range $key, $value := .Values.seedPeer.persistence.annotations }}
|
||||
{{ $key }}: {{ $value }}
|
||||
{{- end }}
|
||||
spec:
|
||||
accessModes:
|
||||
{{- range .Values.seedPeer.persistence.accessModes }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.seedPeer.persistence.size | quote }}
|
||||
{{- if .Values.seedPeer.persistence.storageClass }}
|
||||
{{- if (eq "-" .Values.seedPeer.persistence.storageClass) }}
|
||||
storageClassName: ""
|
||||
{{- else }}
|
||||
storageClassName: "{{ .Values.seedPeer.persistence.storageClass }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -0,0 +1,32 @@
|
|||
{{- if and .Values.seedPeer.metrics.enable .Values.seedPeer.metrics.serviceMonitor.enable }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ template "dragonfly.seedPeer.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.seedPeer.name }}
|
||||
{{- if .Values.seedPeer.metrics.serviceMonitor.additionalLabels }}
|
||||
{{ toYaml .Values.seedPeer.metrics.serviceMonitor.additionalLabels | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
endpoints:
|
||||
- port: http-metrics
|
||||
{{- if .Values.seedPeer.metrics.serviceMonitor.interval }}
|
||||
interval: {{ .Values.seedPeer.metrics.serviceMonitor.interval }}
|
||||
{{- end }}
|
||||
{{- if .Values.seedPeer.metrics.serviceMonitor.scrapeTimeout }}
|
||||
scrapeTimeout: {{ .Values.seedPeer.metrics.serviceMonitor.scrapeTimeout }}
|
||||
{{- end }}
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- {{ .Release.Namespace }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
component: {{ .Values.seedPeer.name }}-metrics
|
||||
{{- end }}
|
|
@ -0,0 +1,30 @@
|
|||
{{- if .Values.trainer.metrics.enable }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "dragonfly.trainer.fullname" . }}-metrics
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.trainer.name }}-metrics
|
||||
{{- if .Values.trainer.metrics.service.labels }}
|
||||
{{ toYaml .Values.metrics.service.labels | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.trainer.metrics.service.annotations }}
|
||||
annotations:
|
||||
{{ toYaml .Values.trainer.metrics.service.annotations | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
type: {{ .Values.trainer.metrics.service.type }}
|
||||
ports:
|
||||
- port: 8000
|
||||
name: http-metrics
|
||||
targetPort: 8000
|
||||
protocol: TCP
|
||||
selector:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
release: {{ .Release.Name }}
|
||||
component: {{ .Values.trainer.name }}
|
||||
{{- end }}
|
|
@ -0,0 +1,20 @@
|
|||
{{- if and .Values.trainer.metrics.enable .Values.trainer.metrics.prometheusRule.enable }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: {{ template "dragonfly.trainer.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.trainer.name }}
|
||||
{{- if .Values.trainer.metrics.prometheusRule.additionalLabels }}
|
||||
{{ toYaml .Values.trainer.metrics.prometheusRule.additionalLabels | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
groups:
|
||||
- name: {{ template "dragonfly.trainer.fullname" $ }}
|
||||
rules:
|
||||
{{ toYaml .Values.trainer.metrics.prometheusRule.rules | indent 8 }}
|
||||
{{- end }}
|
|
@ -0,0 +1,32 @@
|
|||
{{- if and .Values.trainer.metrics.enable .Values.trainer.metrics.serviceMonitor.enable }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ template "dragonfly.trainer.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.trainer.name }}
|
||||
{{- if .Values.trainer.metrics.serviceMonitor.additionalLabels }}
|
||||
{{ toYaml .Values.trainer.metrics.serviceMonitor.additionalLabels | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
endpoints:
|
||||
- port: http-metrics
|
||||
{{- if .Values.trainer.metrics.serviceMonitor.interval }}
|
||||
interval: {{ .Values.trainer.metrics.serviceMonitor.interval }}
|
||||
{{- end }}
|
||||
{{- if .Values.trainer.metrics.serviceMonitor.scrapeTimeout }}
|
||||
scrapeTimeout: {{ .Values.trainer.metrics.serviceMonitor.scrapeTimeout }}
|
||||
{{- end }}
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- {{ .Release.Namespace }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
component: {{ .Values.trainer.name }}-metrics
|
||||
{{- end }}
|
|
@ -0,0 +1,35 @@
|
|||
{{- if .Values.trainer.enable }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "dragonfly.trainer.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.trainer.name }}
|
||||
data:
|
||||
trainer.yaml: |-
|
||||
server:
|
||||
{{ toYaml .Values.trainer.config.server | indent 6 }}
|
||||
security:
|
||||
{{ toYaml .Values.trainer.config.security | indent 6 }}
|
||||
network:
|
||||
{{ toYaml .Values.trainer.config.network | indent 6 }}
|
||||
manager:
|
||||
{{ toYaml .Values.trainer.config.manager | indent 6 }}
|
||||
metrics:
|
||||
enable: {{ .Values.trainer.metrics.enable }}
|
||||
addr: ":8000"
|
||||
console: {{ .Values.trainer.config.console }}
|
||||
verbose: {{ .Values.trainer.config.verbose }}
|
||||
{{- if .Values.trainer.config.verbose }}
|
||||
pprof-port: {{ .Values.trainer.config.pprofPort }}
|
||||
{{- end }}
|
||||
{{- if .Values.trainer.config.jaeger }}
|
||||
jaeger: {{ .Values.trainer.config.jaeger }}
|
||||
{{- else if .Values.jaeger.enable }}
|
||||
jaeger: http://{{ $.Release.Name }}-jaeger-collector.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }}:14268/api/traces
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -0,0 +1,111 @@
|
|||
{{- if .Values.trainer.enable }}
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.trainer.name }}
|
||||
name: {{ template "dragonfly.trainer.fullname" . }}
|
||||
annotations:
|
||||
{{- if .Values.trainer.deploymentAnnotations }}
|
||||
{{ toYaml .Values.trainer.deploymentAnnotations | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.trainer.updateStrategy }}
|
||||
strategy: {{- toYaml .Values.trainer.updateStrategy | nindent 4 }}
|
||||
{{- end }}
|
||||
replicas: {{ .Values.trainer.replicas }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
component: {{ .Values.trainer.name }}
|
||||
release: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
component: {{ .Values.trainer.name }}
|
||||
release: {{ .Release.Name }}
|
||||
{{- if .Values.trainer.podLabels }}
|
||||
{{ toYaml .Values.trainer.podLabels | indent 8 }}
|
||||
{{- end }}
|
||||
annotations:
|
||||
checksum/config: {{ include (print $.Template.BasePath "/trainer/trainer-configmap.yaml") . | sha256sum }}
|
||||
{{- if .Values.trainer.podAnnotations }}
|
||||
{{ toYaml .Values.trainer.podAnnotations | indent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.trainer.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml .Values.trainer.nodeSelector | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.trainer.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml .Values.trainer.tolerations | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.trainer.affinity }}
|
||||
affinity:
|
||||
{{ toYaml .Values.trainer.affinity | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if quote .Values.trainer.terminationGracePeriodSeconds }}
|
||||
terminationGracePeriodSeconds: {{ .Values.trainer.terminationGracePeriodSeconds }}
|
||||
{{- end }}
|
||||
{{- if and (.Capabilities.APIVersions.Has "scheduling.k8s.io/v1beta1") (.Values.scheduler.priorityClassName) }}
|
||||
priorityClassName: {{ .Values.trainer.priorityClassName }}
|
||||
{{- end }}
|
||||
{{- with .Values.trainer.pullSecrets | default .Values.global.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.trainer.hostAliases }}
|
||||
hostAliases:
|
||||
{{ toYaml .Values.trainer.hostAliases | indent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: trainer
|
||||
image: "{{ .Values.trainer.image }}:{{ .Values.trainer.tag }}"
|
||||
imagePullPolicy: {{ .Values.trainer.pullPolicy | quote }}
|
||||
resources:
|
||||
{{ toYaml .Values.trainer.resources | indent 12 }}
|
||||
env:
|
||||
{{- if .Values.trainer.maxProcs }}
|
||||
- name: GOMAXPROCS
|
||||
value: {{ .Values.trainer.maxProcs }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- containerPort: {{ .Values.trainer.containerPort }}
|
||||
protocol: TCP
|
||||
{{- if .Values.trainer.metrics.enable }}
|
||||
- containerPort: 8000
|
||||
protocol: TCP
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: "/etc/dragonfly"
|
||||
{{- if .Values.trainer.extraVolumeMounts }}
|
||||
{{- toYaml .Values.trainer.extraVolumeMounts | nindent 8 }}
|
||||
{{- end }}
|
||||
readinessProbe:
|
||||
exec:
|
||||
command: ["/bin/grpc_health_probe", "-addr=:{{ .Values.trainer.containerPort }}"]
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
livenessProbe:
|
||||
exec:
|
||||
command: ["/bin/grpc_health_probe", "-addr=:{{ .Values.trainer.containerPort }}"]
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 10
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: {{ template "dragonfly.trainer.fullname" . }}
|
||||
items:
|
||||
- key: trainer.yaml
|
||||
path: trainer.yaml
|
||||
{{- if .Values.trainer.extraVolumes }}
|
||||
{{- toYaml .Values.trainer.extraVolumes | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -0,0 +1,30 @@
|
|||
{{- if .Values.trainer.enable }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "dragonfly.trainer.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.trainer.name }}
|
||||
{{- if .Values.trainer.service.labels }}
|
||||
{{ toYaml .Values.trainer.service.labels | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.trainer.service.annotations }}
|
||||
annotations:
|
||||
{{ toYaml .Values.trainer.service.annotations | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
type: {{ .Values.trainer.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.trainer.config.server.port }}
|
||||
name: http-grpc
|
||||
protocol: TCP
|
||||
targetPort: {{ .Values.trainer.config.server.port }}
|
||||
selector:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
release: {{ .Release.Name }}
|
||||
component: {{ .Values.trainer.name }}
|
||||
{{- end }}
|
|
@ -0,0 +1,75 @@
|
|||
{{- if .Values.triton.enable }}
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ template "dragonfly.triton.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.triton.name }}
|
||||
spec:
|
||||
replicas: {{ .Values.triton.replicas }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
component: {{ .Values.triton.name }}
|
||||
release: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
component: {{ .Values.triton.name }}
|
||||
release: {{ .Release.Name }}
|
||||
spec:
|
||||
containers:
|
||||
- name: triton
|
||||
image: "{{ .Values.triton.image }}:{{ .Values.triton.tag}}"
|
||||
imagePullPolicy: {{ .Values.triton.pullPolicy | quote}}
|
||||
args: ["tritonserver", "--model-store={{ .Values.triton.modelRepositoryPath }}",
|
||||
"--model-control-mode=poll",
|
||||
"--repository-poll-secs=5"]
|
||||
env:
|
||||
{{- if .Values.triton.maxProcs }}
|
||||
- name: GOMAXPROCS
|
||||
value: {{ .Values.triton.maxProcs }}
|
||||
{{- end }}
|
||||
- name: DEFAULT_REGION
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ include "dragonfly.triton.fullname" . }}-credentials
|
||||
key: region
|
||||
- name: AWS_ACCESS_KEY_ID
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ include "dragonfly.triton.fullname" . }}-credentials
|
||||
key: accessKeyID
|
||||
- name: AWS_SECRET_ACCESS_KEY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ include "dragonfly.triton.fullname" . }}-credentials
|
||||
key: secretAccessKey
|
||||
ports:
|
||||
- containerPort: {{ .Values.triton.restPort }}
|
||||
name: http-rest
|
||||
protocol: TCP
|
||||
- containerPort: {{ .Values.triton.grpcPort }}
|
||||
name: http-grpc
|
||||
protocol: TCP
|
||||
- containerPort: 8002
|
||||
name: http-metrics
|
||||
protocol: TCP
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /v2/health/ready
|
||||
port: http-rest
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /v2/health/live
|
||||
port: http-rest
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 10
|
||||
{{- end}}
|
|
@ -0,0 +1,11 @@
|
|||
{{- if .Values.triton.enable }}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ template "dragonfly.triton.fullname" . }}-credentials
|
||||
type: Opaque
|
||||
data:
|
||||
region: {{ .Values.triton.aws.region | b64enc | quote }}
|
||||
accessKeyID: {{ .Values.triton.aws.accessKeyID | b64enc | quote }}
|
||||
secretAccessKey: {{ .Values.triton.aws.secretAccessKey | b64enc | quote }}
|
||||
{{- end}}
|
|
@ -0,0 +1,27 @@
|
|||
{{- if .Values.triton.enable }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "dragonfly.triton.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.triton.name }}
|
||||
spec:
|
||||
type: {{ .Values.triton.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.triton.restPort }}
|
||||
name: http-rest
|
||||
protocol: TCP
|
||||
targetPort: http
|
||||
- port: {{ .Values.triton.grpcPort }}
|
||||
name: http-grpc
|
||||
protocol: TCP
|
||||
targetPort: grpc
|
||||
selector:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
release: {{ .Release.Name }}
|
||||
component: {{ .Values.triton.name }}
|
||||
{{- end}}
|
File diff suppressed because it is too large
Load Diff
|
@ -3,7 +3,7 @@ name: nydus-snapshotter
|
|||
description: Nydus snapshotter is an external plugin of containerd for Nydus image service which implements a chunk-based content-addressable filesystem on top of a called RAFS.
|
||||
icon: https://github.com/dragonflyoss/image-service/raw/master/misc/logo.svg
|
||||
type: application
|
||||
version: 0.0.10
|
||||
version: 0.0.8
|
||||
appVersion: 0.9.0
|
||||
keywords:
|
||||
- nydus
|
||||
|
@ -31,7 +31,7 @@ sources:
|
|||
|
||||
annotations:
|
||||
artifacthub.io/changes: |
|
||||
- Change default port of the Dragonfly.
|
||||
- Change maintainers to nydus maintainers.
|
||||
artifacthub.io/links: |
|
||||
- name: Chart Source
|
||||
url: https://github.com/dragonflyoss/helm-charts
|
||||
|
|
|
@ -58,37 +58,29 @@ helm delete nydus-snapshotter --namespace nydus-snapshotter
|
|||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| args | list | `[]` | Args to overwrite default nydus-snapshotter startup command |
|
||||
| containerRuntime | object | `{"containerd":{"configFile":"/etc/containerd/config.toml","enable":true},"initContainer":{"image":{"pullPolicy":"Always","registry":"ghcr.io","repository":"liubin/toml-cli","tag":"v0.0.7"}}}` | [Experimental] Container runtime support Choose special container runtime in Kubernetes. Support: Containerd, Docker, CRI-O |
|
||||
| containerRuntime | object | `{"containerd":{"configFile":"/etc/containerd/config.toml","enable":true},"initContainerImage":"ghcr.io/liubin/toml-cli:v0.0.7"}` | [Experimental] Container runtime support Choose special container runtime in Kubernetes. Support: Containerd, Docker, CRI-O |
|
||||
| containerRuntime.containerd | object | `{"configFile":"/etc/containerd/config.toml","enable":true}` | [Experimental] Containerd support |
|
||||
| containerRuntime.containerd.configFile | string | `"/etc/containerd/config.toml"` | Custom config path directory, default is /etc/containerd/config.toml |
|
||||
| containerRuntime.containerd.enable | bool | `true` | Enable containerd support Inject nydus-snapshotter config into ${containerRuntime.containerd.configFile}, |
|
||||
| containerRuntime.initContainer.image.pullPolicy | string | `"Always"` | Image pull policy. |
|
||||
| containerRuntime.initContainer.image.registry | string | `"ghcr.io"` | Image registry. |
|
||||
| containerRuntime.initContainer.image.repository | string | `"liubin/toml-cli"` | Image repository. |
|
||||
| containerRuntime.initContainer.image.tag | string | `"v0.0.7"` | Image tag. |
|
||||
| containerRuntime.initContainerImage | string | `"ghcr.io/liubin/toml-cli:v0.0.7"` | The image name of init container, just to update container runtime configuration file |
|
||||
| daemonsetAnnotations | object | `{}` | Daemonset annotations |
|
||||
| dragonfly.enable | bool | `true` | Enable dragonfly |
|
||||
| dragonfly.mirrorConfig[0].auth_through | bool | `false` | |
|
||||
| dragonfly.mirrorConfig[0].headers.X-Dragonfly-Registry | string | `"https://index.docker.io"` | |
|
||||
| dragonfly.mirrorConfig[0].host | string | `"http://127.0.0.1:4001"` | |
|
||||
| dragonfly.mirrorConfig[0].ping_url | string | `"http://127.0.0.1:4003/healthy"` | |
|
||||
| global.imagePullSecrets | list | `[]` | Global Docker registry secret names as an array. |
|
||||
| global.imageRegistry | string | `""` | Global Docker image registry. |
|
||||
| global.nodeSelector | object | `{}` | Global node labels for pod assignment. |
|
||||
| dragonfly.mirrorConfig[0].host | string | `"http://127.0.0.1:65001"` | |
|
||||
| dragonfly.mirrorConfig[0].ping_url | string | `"http://127.0.0.1:40901/server/ping"` | |
|
||||
| hostAliases | list | `[]` | Host Aliases |
|
||||
| hostNetwork | bool | `true` | Let nydus-snapshotter run in host network |
|
||||
| hostPid | bool | `true` | Let nydus-snapshotter use the host's pid namespace |
|
||||
| image.pullPolicy | string | `"Always"` | Image pull policy. |
|
||||
| image.pullSecrets | list | `[]` (defaults to global.imagePullSecrets). | Image pull secrets. |
|
||||
| image.registry | string | `"ghcr.io"` | Image registry. |
|
||||
| image.repository | string | `"containerd/nydus-snapshotter"` | Image repository. |
|
||||
| image.tag | string | `"v0.9.0"` | Image tag. |
|
||||
| image | string | `"ghcr.io/containerd/nydus-snapshotter"` | Image repository |
|
||||
| name | string | `"nydus-snapshotter"` | nydus-snapshotter name |
|
||||
| nodeSelector | object | `{}` | Node labels for pod assignment |
|
||||
| podAnnotations | object | `{}` | Pod annotations |
|
||||
| podLabels | object | `{}` | Pod labels |
|
||||
| priorityClassName | string | `""` | Pod priorityClassName |
|
||||
| pullPolicy | string | `"Always"` | Image pull policy |
|
||||
| resources | object | `{"limits":{"cpu":"2","memory":"2Gi"},"requests":{"cpu":"0","memory":"0"}}` | Pod resource requests and limits |
|
||||
| tag | string | `"v0.9.0"` | Image tag |
|
||||
| terminationGracePeriodSeconds | string | `nil` | Pod terminationGracePeriodSeconds |
|
||||
| tolerations | list | `[]` | List of node taints to tolerate |
|
||||
|
||||
|
|
|
@ -18,43 +18,3 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this
|
|||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Return the proper image name
|
||||
{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" .Values.global ) }}
|
||||
*/}}
|
||||
{{- define "common.images.image" -}}
|
||||
{{- $registryName := .imageRoot.registry -}}
|
||||
{{- $repositoryName := .imageRoot.repository -}}
|
||||
{{- $separator := ":" -}}
|
||||
{{- $termination := .imageRoot.tag | toString -}}
|
||||
{{- if .global }}
|
||||
{{- if .global.imageRegistry }}
|
||||
{{- $registryName = .global.imageRegistry -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- if .imageRoot.digest }}
|
||||
{{- $separator = "@" -}}
|
||||
{{- $termination = .imageRoot.digest | toString -}}
|
||||
{{- end -}}
|
||||
{{- if $registryName }}
|
||||
{{- printf "%s/%s%s%s" $registryName $repositoryName $separator $termination -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s%s%s" $repositoryName $separator $termination -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return the proper image name (for the nydus-snapshotter)
|
||||
*/}}
|
||||
{{- define "nydus-snapshotter.image" -}}
|
||||
{{- include "common.images.image" ( dict "imageRoot" .Values.image "global" .Values.global ) -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return the proper image name (for the nydus-snapshotter)
|
||||
*/}}
|
||||
{{- define "nydus-snapshotter.initContainer.image" -}}
|
||||
{{- include "common.images.image" ( dict "imageRoot" .Values.containerRuntime.initContainer.image "global" .Values.global ) -}}
|
||||
{{- end -}}
|
||||
|
|
|
@ -53,18 +53,14 @@ spec:
|
|||
{{- if and (.Capabilities.APIVersions.Has "scheduling.k8s.io/v1beta1") (.Values.priorityClassName) }}
|
||||
priorityClassName: {{ .Values.priorityClassName }}
|
||||
{{- end }}
|
||||
{{- with .Values.image.pullSecrets | default .Values.global.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.hostAliases }}
|
||||
hostAliases:
|
||||
{{ toYaml .Values.hostAliases | indent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: nydus-snapshotter
|
||||
image: {{ template "nydus-snapshotter.image" . }}
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
image: "{{ .Values.image }}:{{ .Values.tag }}"
|
||||
imagePullPolicy: {{ .Values.pullPolicy | quote }}
|
||||
env:
|
||||
- name: ENABLE_NYDUS_OVERLAY
|
||||
value: "false"
|
||||
|
@ -103,8 +99,8 @@ spec:
|
|||
initContainers:
|
||||
{{- if .Values.containerRuntime.containerd.enable }}
|
||||
- name: update-containerd
|
||||
image: {{ template "nydus-snapshotter.initContainer.image" . }}
|
||||
imagePullPolicy: {{ .Values.containerRuntime.initContainer.image.pullPolicy }}
|
||||
image: "{{ .Values.containerRuntime.initContainerImage }}"
|
||||
imagePullPolicy: {{ .Values.pullPolicy | quote }}
|
||||
resources:
|
||||
{{ toYaml .Values.resources | indent 12 }}
|
||||
command:
|
||||
|
|
|
@ -1,27 +1,13 @@
|
|||
# nydus-snapshotter Helm Chart Values
|
||||
#
|
||||
global:
|
||||
# -- Global Docker image registry.
|
||||
imageRegistry: ""
|
||||
# -- Global Docker registry secret names as an array.
|
||||
imagePullSecrets: []
|
||||
# -- Global node labels for pod assignment.
|
||||
nodeSelector: {}
|
||||
|
||||
# -- nydus-snapshotter name
|
||||
name: nydus-snapshotter
|
||||
image:
|
||||
# -- Image registry.
|
||||
registry: ghcr.io
|
||||
# -- Image repository.
|
||||
repository: containerd/nydus-snapshotter
|
||||
# -- Image tag.
|
||||
tag: v0.9.0
|
||||
# -- Image pull policy.
|
||||
pullPolicy: Always
|
||||
# -- Image pull secrets.
|
||||
# @default -- `[]` (defaults to global.imagePullSecrets).
|
||||
pullSecrets: []
|
||||
# -- Image repository
|
||||
image: ghcr.io/containerd/nydus-snapshotter
|
||||
# -- Image tag
|
||||
tag: v0.9.0
|
||||
# -- Image pull policy
|
||||
pullPolicy: Always
|
||||
# -- Let nydus-snapshotter run in host network
|
||||
hostNetwork: true
|
||||
# -- Let nydus-snapshotter use the host's pid namespace
|
||||
|
@ -57,27 +43,19 @@ dragonfly:
|
|||
# -- Enable dragonfly
|
||||
enable: true
|
||||
mirrorConfig:
|
||||
- host: "http://127.0.0.1:4001"
|
||||
- host: "http://127.0.0.1:65001"
|
||||
auth_through: false
|
||||
headers:
|
||||
"X-Dragonfly-Registry": "https://index.docker.io"
|
||||
ping_url: "http://127.0.0.1:4003/healthy"
|
||||
ping_url: "http://127.0.0.1:40901/server/ping"
|
||||
|
||||
|
||||
# -- [Experimental] Container runtime support
|
||||
# Choose special container runtime in Kubernetes.
|
||||
# Support: Containerd, Docker, CRI-O
|
||||
containerRuntime:
|
||||
initContainer:
|
||||
image:
|
||||
# -- Image registry.
|
||||
registry: ghcr.io
|
||||
# -- Image repository.
|
||||
repository: liubin/toml-cli
|
||||
# -- Image tag.
|
||||
tag: v0.0.7
|
||||
# -- Image pull policy.
|
||||
pullPolicy: Always
|
||||
# -- The image name of init container, just to update container runtime configuration file
|
||||
initContainerImage: ghcr.io/liubin/toml-cli:v0.0.7
|
||||
|
||||
# -- [Experimental] Containerd support
|
||||
containerd:
|
||||
|
|
Loading…
Reference in New Issue