Compare commits
106 Commits
dragonfly-
...
main
Author | SHA1 | Date |
---|---|---|
|
1f55fc587a | |
|
7589dfc9a6 | |
|
f36a6f9315 | |
|
7799a87578 | |
|
62969f3fea | |
|
bab9530d46 | |
|
5832e5018f | |
|
7afe3b697e | |
|
d0feef12e6 | |
|
05f5ff0fab | |
|
cac34d36b5 | |
|
2d9deea5a8 | |
|
6fc6c790e7 | |
|
2ca22a98c8 | |
|
19079041bd | |
|
aadd5d2df6 | |
|
181cf5538d | |
|
7379918c08 | |
|
3aca321f93 | |
|
8aceed5453 | |
|
121b88310d | |
|
705d24205f | |
|
7575fc21ff | |
|
b4e9af677d | |
|
c4eeee7da3 | |
|
0c99ab572b | |
|
2c3758a843 | |
|
8ea1d0e504 | |
|
a6d532c80d | |
|
ea231c7d22 | |
|
6eb1281cac | |
|
a372ff3e1d | |
|
bcfe18f3b6 | |
|
c88374e561 | |
|
6fa8aacc91 | |
|
0918609d83 | |
|
0255d07468 | |
|
a317036999 | |
|
fb9e47648e | |
|
fddd0a4e65 | |
|
5515b4b909 | |
|
67e1dbe3c1 | |
|
b4adfbb26d | |
|
5802ff5bef | |
|
9cbd3cee51 | |
|
20f441c4a1 | |
|
7e4362d1d9 | |
|
5350cf07a6 | |
|
cda720a35c | |
|
ce4da5592c | |
|
53caf8c98d | |
|
68d7a9b01a | |
|
0c635cbaae | |
|
df98c72611 | |
|
a4811ea014 | |
|
a36ebca04b | |
|
3dd667710b | |
|
d8a281101c | |
|
3c763a369a | |
|
891ea66d71 | |
|
4b8744ce63 | |
|
4a31c5e694 | |
|
5870303429 | |
|
889421e7de | |
|
2a80a007d7 | |
|
6c67110503 | |
|
d9c9564c5c | |
|
4872c786fa | |
|
240d603106 | |
|
822515f336 | |
|
eb5560e4f0 | |
|
b02430c9f1 | |
|
75569c5588 | |
|
4322fe6ba5 | |
|
90d0b38933 | |
|
8eb6a9dc6c | |
|
10ece2757d | |
|
95afabc513 | |
|
41d0623c93 | |
|
1e25a0d05a | |
|
a86156e4dd | |
|
dbaba913f1 | |
|
33aaa834dc | |
|
7ba32e517d | |
|
3ad2a41161 | |
|
3feb953b89 | |
|
2658b8d27c | |
|
b686794ac4 | |
|
2cd739924b | |
|
01fe885bbf | |
|
28f4644ff8 | |
|
737620ce84 | |
|
0336db9621 | |
|
18d3b6e8e1 | |
|
edaac8f8ae | |
|
3a10e770dc | |
|
e58ae6b5c7 | |
|
2b0224477a | |
|
867657ef07 | |
|
33bcd6f83a | |
|
712477b6c9 | |
|
1b5581ab37 | |
|
beee42afdc | |
|
5795ed22eb | |
|
961385739e | |
|
ed435b375c |
|
@ -0,0 +1,17 @@
|
|||
# Set to true to add reviewers to pull requests
|
||||
addReviewers: true
|
||||
|
||||
# Set to true to add assignees to pull requests
|
||||
addAssignees: author
|
||||
|
||||
# A list of reviewers to be added to pull requests (GitHub user name)
|
||||
reviewers:
|
||||
- gaius-qi
|
||||
- yxxhero
|
||||
- chlins
|
||||
- CormickKneey
|
||||
- imeoer
|
||||
- BraveY
|
||||
|
||||
# A number of reviewers added to the pull request
|
||||
numberOfReviewers: 3
|
|
@ -0,0 +1,11 @@
|
|||
name: "Auto Assign"
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [opened, reopened, ready_for_review]
|
||||
|
||||
jobs:
|
||||
add-assignee:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: kentaro-m/auto-assign-action@9f6dbe84a80c6e7639d1b9698048b201052a2a94
|
|
@ -43,10 +43,10 @@ jobs:
|
|||
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: 3.7
|
||||
python-version: 3.9
|
||||
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@v2.6.1
|
||||
uses: helm/chart-testing-action@v2.7.0
|
||||
|
||||
- name: Run chart-testing (list-changed)
|
||||
id: list-changed
|
||||
|
@ -60,14 +60,17 @@ jobs:
|
|||
run: ct lint --config ./.github/ct.yaml
|
||||
|
||||
- name: Create kind cluster
|
||||
uses: helm/kind-action@v1.10.0
|
||||
uses: helm/kind-action@v1.12.0
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
|
||||
- name: Add bitnami chart repos
|
||||
run: helm repo add bitnami https://charts.bitnami.com/bitnami
|
||||
|
||||
- name: Add jaegertracing chart repos
|
||||
run: helm repo add jaegertracing https://jaegertracing.github.io/helm-charts
|
||||
- name: Add dragonfly chart repos
|
||||
run: helm repo add dragonfly https://dragonflyoss.github.io/helm-charts/
|
||||
|
||||
- name: Add nydus-snapshotter chart repos
|
||||
run: helm repo add nydus-snapshotter https://dragonflyoss.github.io/helm-charts/
|
||||
|
||||
- name: Run chart-testing (install)
|
||||
run: ct install --config ./.github/ct.yaml
|
||||
|
|
|
@ -0,0 +1,20 @@
|
|||
name: PR Label
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, labeled, unlabeled, synchronize]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
classify:
|
||||
name: Classify PR
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: PR impact specified
|
||||
uses: mheap/github-action-required-labels@8afbe8ae6ab7647d0c9f0cfa7c2f939650d22509 # v5.5
|
||||
with:
|
||||
mode: exactly
|
||||
count: 1
|
||||
labels: 'bug, enhancement, documentation, dependencies'
|
|
@ -5,6 +5,7 @@ on:
|
|||
branches:
|
||||
- main
|
||||
paths:
|
||||
- charts/dragonfly-stack/Chart.yaml
|
||||
- charts/dragonfly/Chart.yaml
|
||||
- charts/nydus-snapshotter/Chart.yaml
|
||||
|
||||
|
@ -23,6 +24,6 @@ jobs:
|
|||
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||
|
||||
- name: Run chart-releaser
|
||||
uses: helm/chart-releaser-action@v1.6.0
|
||||
uses: helm/chart-releaser-action@v1.7.0
|
||||
env:
|
||||
CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||
|
|
|
@ -0,0 +1,31 @@
|
|||
name: Close stale issues and PRs
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: "0 0 * * *"
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9.1.0
|
||||
id: stale
|
||||
with:
|
||||
delete-branch: true
|
||||
days-before-close: 7
|
||||
days-before-stale: 90
|
||||
days-before-pr-close: 7
|
||||
days-before-pr-stale: 120
|
||||
stale-issue-label: "stale"
|
||||
exempt-issue-labels: bug,wip,on-hold
|
||||
exempt-pr-labels: bug,wip,on-hold
|
||||
exempt-all-milestones: true
|
||||
stale-issue-message: 'This issue is stale because it has been open 90 days with no activity.'
|
||||
close-issue-message: 'This issue was closed because it has been stalled for 7 days with no activity.'
|
||||
stale-pr-message: 'This PR is stale because it has been open 120 days with no activity.'
|
||||
close-pr-message: 'This PR was closed because it has been stalled for 7 days with no activity.'
|
56
INSTALL.md
56
INSTALL.md
|
@ -22,7 +22,7 @@ This document will help you experience how to use [Dragonfly](https://d7y.io) &
|
|||
Download containerd configuration for kind.
|
||||
|
||||
```shell
|
||||
curl -fsSL -o config.toml https://raw.githubusercontent.com/dragonflyoss/Dragonfly2/main/test/testdata/containerd/config.toml
|
||||
curl -fsSL -o config.toml https://raw.githubusercontent.com/dragonflyoss/dragonfly/main/test/testdata/containerd/config.toml
|
||||
```
|
||||
|
||||
Create kind cluster configuration file `kind-config.yaml`, configuration content is as follows:
|
||||
|
@ -37,8 +37,8 @@ nodes:
|
|||
- role: control-plane
|
||||
image: kindest/node:v1.23.17
|
||||
extraPortMappings:
|
||||
- containerPort: 65001
|
||||
hostPort: 65001
|
||||
- containerPort: 4001
|
||||
hostPort: 4001
|
||||
protocol: TCP
|
||||
extraMounts:
|
||||
- hostPath: ./config.toml
|
||||
|
@ -83,7 +83,7 @@ Install Dragonfly using the configuration:
|
|||
|
||||
```shell
|
||||
$ helm repo add dragonfly https://dragonflyoss.github.io/helm-charts/
|
||||
$ helm install --wait --timeout 10m --dependency-update --create-namespace --namespace dragonfly-system dragonfly dragonfly/dragonfly --set dfdaemon.config.download.prefetch=true,seedPeer.config.download.prefetch=true
|
||||
$ helm install --wait --timeout 10m --dependency-update --create-namespace --namespace dragonfly-system dragonfly dragonfly/dragonfly --set client.config.proxy.prefetch=true,seedClient.config.proxy.prefetch=true
|
||||
NAME: dragonfly
|
||||
LAST DEPLOYED: Fri Apr 7 10:35:12 2023
|
||||
NAMESPACE: dragonfly-system
|
||||
|
@ -97,12 +97,7 @@ NOTES:
|
|||
kubectl --namespace dragonfly-system port-forward $SCHEDULER_POD_NAME 8002:$SCHEDULER_CONTAINER_PORT
|
||||
echo "Visit http://127.0.0.1:8002 to use your scheduler"
|
||||
|
||||
2. Get the dfdaemon port by running these commands:
|
||||
export DFDAEMON_POD_NAME=$(kubectl get pods --namespace dragonfly-system -l "app=dragonfly,release=dragonfly,component=dfdaemon" -o jsonpath={.items[0].metadata.name})
|
||||
export DFDAEMON_CONTAINER_PORT=$(kubectl get pod --namespace dragonfly-system $DFDAEMON_POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
|
||||
You can use $DFDAEMON_CONTAINER_PORT as a proxy port in Node.
|
||||
|
||||
3. Configure runtime to use dragonfly:
|
||||
2. Configure runtime to use dragonfly:
|
||||
https://d7y.io/docs/getting-started/quick-start/kubernetes/
|
||||
```
|
||||
|
||||
|
@ -112,7 +107,7 @@ Check that Dragonfly is deployed successfully:
|
|||
|
||||
```shell
|
||||
$ kubectl wait po --all -n dragonfly-system --for=condition=ready --timeout=10m
|
||||
pod/dragonfly-dfdaemon-gs924 condition met
|
||||
pod/dragonfly-client-gs924 condition met
|
||||
pod/dragonfly-manager-5d97fd88fb-txnw9 condition met
|
||||
pod/dragonfly-manager-5d97fd88fb-v2nmh condition met
|
||||
pod/dragonfly-manager-5d97fd88fb-xg6wr condition met
|
||||
|
@ -124,9 +119,9 @@ pod/dragonfly-redis-replicas-2 condition met
|
|||
pod/dragonfly-scheduler-0 condition met
|
||||
pod/dragonfly-scheduler-1 condition met
|
||||
pod/dragonfly-scheduler-2 condition met
|
||||
pod/dragonfly-seed-peer-0 condition met
|
||||
pod/dragonfly-seed-peer-1 condition met
|
||||
pod/dragonfly-seed-peer-2 condition met
|
||||
pod/dragonfly-seed-client-0 condition met
|
||||
pod/dragonfly-seed-client-1 condition met
|
||||
pod/dragonfly-seed-client-2 condition met
|
||||
```
|
||||
|
||||
## Install Nydus based on Helm Charts
|
||||
|
@ -137,7 +132,7 @@ Install Nydus using the default configuration, for more information about mirror
|
|||
<!-- markdownlint-disable -->
|
||||
|
||||
```shell
|
||||
$ curl -fsSL -o config-nydus.yaml https://raw.githubusercontent.com/dragonflyoss/Dragonfly2/main/test/testdata/charts/config-nydus.yaml
|
||||
$ curl -fsSL -o config-nydus.yaml https://raw.githubusercontent.com/dragonflyoss/dragonfly/main/test/testdata/charts/config-nydus.yaml
|
||||
$ helm install --wait --timeout 10m --dependency-update --create-namespace --namespace nydus-snapshotter nydus-snapshotter dragonfly/nydus-snapshotter -f config-nydus.yaml
|
||||
NAME: nydus-snapshotter
|
||||
LAST DEPLOYED: Fri Apr 7 10:40:50 2023
|
||||
|
@ -215,15 +210,9 @@ Verify downloaded Nydus image via Dragonfly based on mirror mode:
|
|||
<!-- markdownlint-disable -->
|
||||
|
||||
```shell
|
||||
$ DFDAEMON_POD_NAME=`kubectl -n dragonfly-system get pod -l component=dfdaemon --no-headers -o custom-columns=NAME:metadata.name`
|
||||
$ kubectl -n dragonfly-system exec -it ${DFDAEMON_POD_NAME} -- sh -c 'grep "peer task done" /var/log/dragonfly/daemon/core.log'
|
||||
{"level":"info","ts":"2023-04-10 07:30:57.596","caller":"peer/peertask_conductor.go:1330","msg":"peer task done, cost: 1116ms","peer":"10.244.0.5-1-53419631-8a14-4325-b5f2-c4ef01a02853","task":"d6a7aaa926dccd3376f91378f58d3a1a0871302d0afee718fd991a6849b422a7","component":"PeerTask","trace":"977c114a06b6d3a12fc680b28b57a43d"}
|
||||
{"level":"info","ts":"2023-04-10 07:30:58.594","caller":"peer/peertask_conductor.go:1330","msg":"peer task done, cost: 2116ms","peer":"10.244.0.5-1-4c45ed29-4931-4cfc-a8e7-ba06a7575518","task":"984629e0ba47eeccd65ffea34d1369d71bb821169c83918795cceb4e9774d3eb","component":"PeerTask","trace":"e9249680e787c9a13935aee1b280665a"}
|
||||
{"level":"info","ts":"2023-04-10 07:30:58.598","caller":"peer/peertask_conductor.go:1330","msg":"peer task done, cost: 2133ms","peer":"10.244.0.5-1-45e3cd5b-cac6-43f0-be82-398cab978e83","task":"571f792ad3e2b12cc28407f8f14d17a44925e0151aff947773bdac5bec64b8d6","component":"PeerTask","trace":"f4e79e09ac293603875b9542c9b24bb4"}
|
||||
{"level":"info","ts":"2023-04-10 07:30:58.905","caller":"peer/peertask_conductor.go:1330","msg":"peer task done, cost: 2377ms","peer":"10.244.0.5-1-6d51916a-13cb-4e50-8ba0-886e786e32eb","task":"023b961410d8776250215268f3569fa4ccb01bf1c557ca0e73888c4dd8c23ace","component":"PeerTask","trace":"285f5ecf084873e4311526136438d571"}
|
||||
{"level":"info","ts":"2023-04-10 07:30:59.452","caller":"peer/peertask_conductor.go:1330","msg":"peer task done, cost: 538ms","peer":"10.244.0.5-1-b7b2235f-4b0f-4253-8a1f-cdf7bd86f096","task":"23dee111679d459440e4839200940534037f1ba101bd7b7af57c9b7123f96882","component":"PeerTask","trace":"63d5147c7bd01455ce3c537f18463b12"}
|
||||
{"level":"info","ts":"2023-04-10 07:31:01.722","caller":"peer/peertask_conductor.go:1330","msg":"peer task done, cost: 4123ms","peer":"10.244.0.5-1-0dbbfe12-df46-4e3b-98dc-fa6c8f2a514c","task":"15c51bc09cf57b4c5c1c04e9cbdf17fa4560c6ad10b5b32680f0b8cd63bb900b","component":"PeerTask","trace":"b9bcac5bfe5d1f1871db22911d7d71b5"}
|
||||
{"level":"info","ts":"2023-04-10 07:31:02.897","caller":"peer/peertask_conductor.go:1330","msg":"peer task done, cost: 6483ms","peer":"10.244.0.5-1-be485ea5-6d54-4f56-8f56-bdbe76ec8469","task":"0fe34e3fcb64d49b09fe7c759f47a373b7590fe4dbe1da6d9c732eee516e4cb4","component":"PeerTask","trace":"daa2ffd1021779dfbd3162ead765e0ba"}
|
||||
$ CLIENT_POD_NAME=`kubectl -n dragonfly-system get pod -l component=client --no-headers -o custom-columns=NAME:metadata.name`
|
||||
$ kubectl -n dragonfly-system exec -it ${CLIENT_POD_NAME} -- sh -c 'grep "download task succeeded" /var/log/dragonfly/dfdaemon/dfdaemon.log'
|
||||
2024-05-28T12:36:24.861903Z INFO download_task: dragonfly-client/src/grpc/dfdaemon_download.rs:276: download task succeeded host_id="127.0.0.1-kind-worker" task_id="4535f073321f0d1908b8c3ad63a1d59324573c0083961c5bcb7f38ac72ad598d" peer_id="127.0.0.1-kind-worker-13095fb5-786a-4908-b8c1-744be144b383"
|
||||
```
|
||||
|
||||
<!-- markdownlint-restore -->
|
||||
|
@ -246,7 +235,7 @@ Create Draognfly configuration file `d7y-config.yaml`, configuration content is
|
|||
|
||||
```shell
|
||||
cat <<EOF > d7y-config.yaml
|
||||
seedPeer:
|
||||
seedClient:
|
||||
persistence:
|
||||
storageClass: "alicloud-disk-essd"
|
||||
size: 20Gi
|
||||
|
@ -277,7 +266,7 @@ Install Dragonfly using the params:
|
|||
|
||||
```shell
|
||||
$ helm repo add dragonfly https://dragonflyoss.github.io/helm-charts/
|
||||
$ helm install --wait --timeout 10m --dependency-update --create-namespace --namespace dragonfly-system dragonfly dragonfly/dragonfly --set dfdaemon.config.download.prefetch=true,seedPeer.config.download.prefetch=true-f d7y-config.yaml
|
||||
$ helm install --wait --timeout 10m --dependency-update --create-namespace --namespace dragonfly-system dragonfly dragonfly/dragonfly --set client.config.proxy.prefetch=true,seedClient.config.proxy.prefetch=true-f d7y-config.yaml
|
||||
NAME: dragonfly
|
||||
LAST DEPLOYED: Fri Apr 7 10:35:12 2023
|
||||
NAMESPACE: dragonfly-system
|
||||
|
@ -291,12 +280,7 @@ NOTES:
|
|||
kubectl --namespace dragonfly-system port-forward $SCHEDULER_POD_NAME 8002:$SCHEDULER_CONTAINER_PORT
|
||||
echo "Visit http://127.0.0.1:8002 to use your scheduler"
|
||||
|
||||
2. Get the dfdaemon port by running these commands:
|
||||
export DFDAEMON_POD_NAME=$(kubectl get pods --namespace dragonfly-system -l "app=dragonfly,release=dragonfly,component=dfdaemon" -o jsonpath={.items[0].metadata.name})
|
||||
export DFDAEMON_CONTAINER_PORT=$(kubectl get pod --namespace dragonfly-system $DFDAEMON_POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
|
||||
You can use $DFDAEMON_CONTAINER_PORT as a proxy port in Node.
|
||||
|
||||
3. Configure runtime to use dragonfly:
|
||||
2. Configure runtime to use dragonfly:
|
||||
https://d7y.io/docs/getting-started/quick-start/kubernetes/
|
||||
```
|
||||
|
||||
|
@ -306,7 +290,7 @@ Check that Dragonfly is deployed successfully:
|
|||
|
||||
```shell
|
||||
$ kubectl wait po --all -n dragonfly-system --for=condition=ready --timeout=10m
|
||||
pod/dragonfly-dfdaemon-gs924 condition met
|
||||
pod/dragonfly-client-gs924 condition met
|
||||
pod/dragonfly-manager-5d97fd88fb-txnw9 condition met
|
||||
pod/dragonfly-manager-5d97fd88fb-v2nmh condition met
|
||||
pod/dragonfly-manager-5d97fd88fb-xg6wr condition met
|
||||
|
@ -318,7 +302,7 @@ pod/dragonfly-redis-replicas-2 condition met
|
|||
pod/dragonfly-scheduler-0 condition met
|
||||
pod/dragonfly-scheduler-1 condition met
|
||||
pod/dragonfly-scheduler-2 condition met
|
||||
pod/dragonfly-seed-peer-0 condition met
|
||||
pod/dragonfly-seed-peer-1 condition met
|
||||
pod/dragonfly-seed-peer-2 condition met
|
||||
pod/dragonfly-seed-client-0 condition met
|
||||
pod/dragonfly-seed-client-1 condition met
|
||||
pod/dragonfly-seed-client-2 condition met
|
||||
```
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
# Dragonfly Community Helm Charts
|
||||
|
||||
[](https://artifacthub.io/packages/helm/dragonfly/dragonfly-stack)
|
||||
[](https://artifacthub.io/packages/helm/dragonfly/dragonfly)
|
||||
[](https://artifacthub.io/packages/helm/dragonfly/nydus-snapshotter)
|
||||

|
||||
|
@ -20,18 +21,19 @@ Please refer to the [document][install] to install Dragonfly & Nydus on Kubernet
|
|||
|
||||
## Documentation
|
||||
|
||||
- [Install Dragonfly & Nydus on Kubernetes][install]
|
||||
- [Install Dragonfly Stack on Kubernetes](./charts/dragonfly-stack/README.md)
|
||||
- [Install Dragonfly on Kubernetes](./charts/dragonfly/README.md)
|
||||
- [Install Nydus on Kubernetes](./charts/nydus-snapshotter/README.md)
|
||||
- [Install Dragonfly & Nydus on Kubernetes][install]
|
||||
|
||||
## Community
|
||||
|
||||
Join the conversation and help the community.
|
||||
|
||||
- **Slack Channel**: [#dragonfly](https://cloud-native.slack.com/messages/dragonfly/) on [CNCF Slack](https://slack.cncf.io/)
|
||||
- **Discussion Group**: <dragonfly-discuss@googlegroups.com>
|
||||
- **Github Discussions**: [Dragonfly Discussion Forum](https://github.com/dragonflyoss/dragonfly/discussions)
|
||||
- **Developer Group**: <dragonfly-developers@googlegroups.com>
|
||||
- **Github Discussions**: [Dragonfly Discussion Forum][discussion]
|
||||
- **Maintainer Group**: <dragonfly-maintainers@googlegroups.com>
|
||||
- **Twitter**: [@dragonfly_oss](https://twitter.com/dragonfly_oss)
|
||||
- **DingTalk**: [22880028764](https://qr.dingtalk.com/action/joingroup?code=v1,k1,pkV9IbsSyDusFQdByPSK3HfCG61ZCLeb8b/lpQ3uUqI=&_dt_no_comment=1&origin=11)
|
||||
|
||||
|
|
|
@ -0,0 +1,23 @@
|
|||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
|
@ -0,0 +1,54 @@
|
|||
apiVersion: v2
|
||||
name: dragonfly-stack
|
||||
description: Collects Dragonfly component and Nydus component into a single chart to provide a complete solution for the Dragonfly stack.
|
||||
icon: https://raw.githubusercontent.com/dragonflyoss/dragonfly/main/docs/images/logo/dragonfly.svg
|
||||
type: application
|
||||
version: 0.1.2
|
||||
appVersion: 2.1.49
|
||||
keywords:
|
||||
- dragonfly-stack
|
||||
- dragonfly
|
||||
- nydus
|
||||
- d7y
|
||||
- P2P
|
||||
- image
|
||||
|
||||
maintainers:
|
||||
- name: gaius-qi
|
||||
email: gaius.qi@gmail.com
|
||||
- name: imeoer
|
||||
email: imeoer@gmail.com
|
||||
- name: adamqqqplay
|
||||
email: adamqqq@163.com
|
||||
|
||||
home: https://d7y.io/
|
||||
|
||||
sources:
|
||||
- https://github.com/dragonflyoss/dragonfly
|
||||
- https://github.com/dragonflyoss/client
|
||||
- https://github.com/dragonflyoss/image-service
|
||||
- https://github.com/containerd/nydus-snapshotter/
|
||||
|
||||
annotations:
|
||||
artifacthub.io/changes: |
|
||||
- Rename repo Dragonfly2 to dragonfly.
|
||||
|
||||
artifacthub.io/links: |
|
||||
- name: Chart Source
|
||||
url: https://github.com/dragonflyoss/helm-charts
|
||||
- name: Source
|
||||
url: https://github.com/dragonflyoss/dragonfly
|
||||
- name: Source
|
||||
url: https://github.com/dragonflyoss/client
|
||||
- name: Source
|
||||
url: https://github.com/containerd/nydus-snapshotter
|
||||
|
||||
dependencies:
|
||||
- name: dragonfly
|
||||
version: 1.1.67
|
||||
repository: https://dragonflyoss.github.io/helm-charts/
|
||||
condition: dragonfly.enable
|
||||
- name: nydus-snapshotter
|
||||
version: 0.0.10
|
||||
repository: https://dragonflyoss.github.io/helm-charts/
|
||||
condition: nydus-snapshotter.enable
|
File diff suppressed because one or more lines are too long
|
@ -0,0 +1,133 @@
|
|||
# Dragonfly Stack Helm Chart
|
||||
|
||||
[](https://artifacthub.io/packages/search?repo=dragonfly-stack)
|
||||
|
||||
Collects Dragonfly component and Nydus component into a single chart to provide a complete solution for the Dragonfly stack.
|
||||
|
||||
## TL;DR
|
||||
|
||||
```shell
|
||||
helm repo add dragonfly-stack https://dragonflyoss.github.io/helm-charts/
|
||||
helm install --create-namespace --namespace dragonfly-system dragonfly dragonfly/dragonfly-stask
|
||||
```
|
||||
|
||||
## Introduction
|
||||
|
||||
Dragonfly is an open source intelligent P2P based image and file distribution system. Its goal is to tackle all distribution problems in cloud native scenarios. Currently Dragonfly focuses on being:
|
||||
|
||||
- Simple: well-defined user-facing API (HTTP), non-invasive to all container engines;
|
||||
- Efficient: Seed peer support, P2P based file distribution to save enterprise bandwidth;
|
||||
- Intelligent: host level speed limit, intelligent flow control due to host detection;
|
||||
- Secure: block transmission encryption, HTTPS connection support.
|
||||
|
||||
Dragonfly is now hosted by the Cloud Native Computing Foundation (CNCF) as an Incubating Level Project. Originally it was born to solve all kinds of distribution at very large scales, such as application distribution, cache distribution, log distribution, image distribution, and so on.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Kubernetes cluster 1.20+
|
||||
- Helm v3.8.0+
|
||||
|
||||
## Installation Guide
|
||||
|
||||
When use Dragonfly in Kubernetes, a container runtime must be configured. These work can be done by init script in this charts.
|
||||
|
||||
For more detail about installation is available in [Kubernetes with Dragonfly](https://d7y.io/docs/getting-started/quick-start/kubernetes/)
|
||||
|
||||
We recommend read the details about [Kubernetes with Dragonfly](https://d7y.io/docs/getting-started/quick-start/kubernetes/) before install.
|
||||
|
||||
> **We did not recommend to using dragonfly with docker in Kubernetes** due to many reasons: 1. no fallback image pulling policy. 2. deprecated in Kubernetes.
|
||||
|
||||
## Installation
|
||||
|
||||
### Install with custom configuration
|
||||
|
||||
Create the `values.yaml` configuration file. It is recommended to use external redis and mysql instead of containers. This example uses external mysql and redis.
|
||||
|
||||
```yaml
|
||||
dragonfly:
|
||||
mysql:
|
||||
enable: false
|
||||
|
||||
externalMysql:
|
||||
migrate: true
|
||||
host: mysql-host
|
||||
username: dragonfly
|
||||
password: dragonfly
|
||||
database: manager
|
||||
port: 3306
|
||||
|
||||
redis:
|
||||
enable: false
|
||||
|
||||
externalRedis:
|
||||
addrs:
|
||||
- redis.example.com:6379
|
||||
password: dragonfly
|
||||
```
|
||||
|
||||
Install dragonfly-stack chart with release name `dragonfly`:
|
||||
|
||||
```shell
|
||||
helm repo add dragonfly-stack https://dragonflyoss.github.io/helm-charts/
|
||||
helm install --create-namespace --namespace dragonfly-system dragonfly dragonfly/dragonfly-stack -f values.yaml
|
||||
```
|
||||
|
||||
### Install with an existing manager
|
||||
|
||||
Create the `values.yaml` configuration file. Need to configure the cluster id associated with scheduler and seed peer. This example is to deploy a cluster using the existing manager and redis.
|
||||
|
||||
```yaml
|
||||
dragonfly:
|
||||
scheduler:
|
||||
config:
|
||||
manager:
|
||||
schedulerClusterID: 1
|
||||
|
||||
seedClient:
|
||||
config:
|
||||
seedPeer:
|
||||
enable: true
|
||||
type: super
|
||||
clusterID: 1
|
||||
|
||||
manager:
|
||||
enable: false
|
||||
|
||||
externalManager:
|
||||
enable: true
|
||||
host: "dragonfly-manager.dragonfly-system.svc.cluster.local"
|
||||
restPort: 8080
|
||||
grpcPort: 65003
|
||||
|
||||
redis:
|
||||
enable: false
|
||||
|
||||
externalRedis:
|
||||
addrs:
|
||||
- redis.example.com:6379
|
||||
password: dragonfly
|
||||
|
||||
mysql:
|
||||
enable: false
|
||||
```
|
||||
|
||||
Install dragonfly-stack chart with release name `dragonfly`:
|
||||
|
||||
```shell
|
||||
helm repo add dragonfly-stack https://dragonflyoss.github.io/helm-charts/
|
||||
helm install --create-namespace --namespace dragonfly-system dragonfly dragonfly/dragonfly-stack -f values.yaml
|
||||
```
|
||||
|
||||
## Uninstall
|
||||
|
||||
Uninstall the `dragonfly` deployment:
|
||||
|
||||
```shell
|
||||
helm delete dragonfly --namespace dragonfly-system
|
||||
```
|
||||
|
||||
{{ template "chart.valuesSection" . }}
|
||||
|
||||
## Chart dependencies
|
||||
|
||||
{{ template "chart.requirementsTable" . }}
|
File diff suppressed because it is too large
Load Diff
|
@ -1,10 +1,10 @@
|
|||
apiVersion: v2
|
||||
name: dragonfly
|
||||
description: Dragonfly is an intelligent P2P based image and file distribution system
|
||||
icon: https://raw.githubusercontent.com/dragonflyoss/Dragonfly2/main/docs/images/logo/dragonfly.svg
|
||||
icon: https://raw.githubusercontent.com/dragonflyoss/dragonfly/main/docs/images/logo/dragonfly.svg
|
||||
type: application
|
||||
version: 1.1.61
|
||||
appVersion: 2.1.44
|
||||
version: 1.4.4
|
||||
appVersion: 2.3.1-rc.2
|
||||
keywords:
|
||||
- dragonfly
|
||||
- d7y
|
||||
|
@ -12,56 +12,49 @@ keywords:
|
|||
- image
|
||||
|
||||
maintainers:
|
||||
- name: jim3ma
|
||||
email: majinjing3@gmail.com
|
||||
- name: gaius-qi
|
||||
email: gaius.qi@gmail.com
|
||||
- name: yxxhero
|
||||
email: aiopsclub@163.com
|
||||
- name: jim3ma
|
||||
email: majinjing3@gmail.com
|
||||
|
||||
home: https://d7y.io/
|
||||
|
||||
sources:
|
||||
- https://github.com/dragonflyoss/Dragonfly2
|
||||
- https://github.com/dragonflyoss/dragonfly
|
||||
- https://github.com/dragonflyoss/client
|
||||
|
||||
annotations:
|
||||
artifacthub.io/changes: |
|
||||
- Optimize default params for concurrent downloading.
|
||||
- Update client version to v0.1.62.
|
||||
- Bump Dragonfly to v2.3.1-rc.2.
|
||||
- Bump Client to v1.0.9.
|
||||
|
||||
artifacthub.io/links: |
|
||||
- name: Chart Source
|
||||
url: https://github.com/dragonflyoss/helm-charts
|
||||
- name: Source
|
||||
url: https://github.com/dragonflyoss/Dragonfly2
|
||||
url: https://github.com/dragonflyoss/dragonfly
|
||||
- name: Source
|
||||
url: https://github.com/dragonflyoss/client
|
||||
artifacthub.io/images: |
|
||||
- name: manager
|
||||
image: dragonflyoss/manager:v2.1.44
|
||||
image: dragonflyoss/manager:v2.3.1-rc.2
|
||||
- name: scheduler
|
||||
image: dragonflyoss/scheduler:v2.1.44
|
||||
image: dragonflyoss/scheduler:v2.3.1-rc.2
|
||||
- name: client
|
||||
image: dragonflyoss/client:v0.1.62
|
||||
image: dragonflyoss/client:v1.0.9
|
||||
- name: seed-client
|
||||
image: dragonflyoss/client:v0.1.62
|
||||
image: dragonflyoss/client:v1.0.9
|
||||
- name: dfinit
|
||||
image: dragonflyoss/dfinit:v0.1.62
|
||||
- name: dfdaemon
|
||||
image: dragonflyoss/dfdaemon:v2.1.44
|
||||
- name: trainer
|
||||
image: dragonflyoss/scheduler:v2.1.44
|
||||
- name: triton
|
||||
image: nvcr.io/nvidia/tritonserver:23.06-py3
|
||||
image: dragonflyoss/dfinit:v1.0.9
|
||||
|
||||
dependencies:
|
||||
- name: mysql
|
||||
version: 9.4.6
|
||||
version: 10.1.1
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
condition: mysql.enable
|
||||
- name: redis
|
||||
version: 17.4.3
|
||||
version: 19.5.5
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
condition: redis.enable
|
||||
- name: jaeger
|
||||
version: 0.66.1
|
||||
repository: https://jaegertracing.github.io/helm-charts
|
||||
condition: jaeger.enable
|
||||
|
|
|
@ -81,13 +81,12 @@ scheduler:
|
|||
manager:
|
||||
schedulerClusterID: 1
|
||||
|
||||
seedPeer:
|
||||
seedClient:
|
||||
config:
|
||||
scheduler:
|
||||
manager:
|
||||
seedPeer:
|
||||
enable: true
|
||||
clusterID: 1
|
||||
seedPeer:
|
||||
enable: true
|
||||
type: super
|
||||
clusterID: 1
|
||||
|
||||
manager:
|
||||
enable: false
|
||||
|
@ -129,54 +128,62 @@ helm delete dragonfly --namespace dragonfly-system
|
|||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| client.config.download.concurrentPieceCount | int | `16` | concurrentPieceCount is the number of concurrent pieces to download. |
|
||||
| client.config.download.pieceTimeout | string | `"30s"` | pieceTimeout is the timeout for downloading a piece from source. |
|
||||
| client.config.download.rateLimit | int | `20000000000` | rateLimit is the default rate limit of the download speed in bps(bytes per second), default is 20Gbps. |
|
||||
| client.config.console | bool | `true` | console prints log. |
|
||||
| client.config.download.collectedPieceTimeout | string | `"10s"` | collected_piece_timeout is the timeout for collecting one piece from the parent in the stream. |
|
||||
| client.config.download.concurrentPieceCount | int | `8` | concurrentPieceCount is the number of concurrent pieces to download. |
|
||||
| client.config.download.pieceTimeout | string | `"40s"` | pieceTimeout is the timeout for downloading a piece from source. |
|
||||
| client.config.download.rateLimit | string | `"50GiB"` | rateLimit is the default rate limit of the download speed in GiB/Mib/Kib per second, default is 50GiB/s. |
|
||||
| client.config.download.server.requestRateLimit | int | `4000` | request_rate_limit is the rate limit of the download request in the download grpc server, default is 4000 req/s. |
|
||||
| client.config.download.server.socketPath | string | `"/var/run/dragonfly/dfdaemon.sock"` | socketPath is the unix socket path for dfdaemon GRPC service. |
|
||||
| client.config.dynconfig.refreshInterval | string | `"5m"` | refreshInterval is the interval to refresh dynamic configuration from manager. |
|
||||
| client.config.gc.interval | string | `"900s"` | interval is the interval to do gc. |
|
||||
| client.config.gc.policy.distHighThresholdPercent | int | `80` | distHighThresholdPercent is the high threshold percent of the disk usage. If the disk usage is greater than the threshold, dfdaemon will do gc. |
|
||||
| client.config.gc.policy.distLowThresholdPercent | int | `60` | distLowThresholdPercent is the low threshold percent of the disk usage. If the disk usage is less than the threshold, dfdaemon will stop gc. |
|
||||
| client.config.gc.policy.taskTTL | string | `"168h"` | taskTTL is the ttl of the task. |
|
||||
| client.config.gc.policy.taskTTL | string | `"720h"` | taskTTL is the ttl of the task. |
|
||||
| client.config.health.server.port | int | `4003` | port is the port to the health server. |
|
||||
| client.config.host | object | `{"idc":"","location":""}` | host is the host configuration for dfdaemon. |
|
||||
| client.config.log.level | string | `"info"` | Specify the logging level [trace, debug, info, warn, error] |
|
||||
| client.config.manager.addrs | list | `[]` | addrs is manager addresses. |
|
||||
| client.config.manager.addr | string | `""` | addr is manager address. |
|
||||
| client.config.metrics.server.port | int | `4002` | port is the port to the metrics server. |
|
||||
| client.config.proxy.disableBackToSource | bool | `false` | disableBackToSource indicates whether disable to download back-to-source when download failed. |
|
||||
| client.config.proxy.prefetch | bool | `false` | prefetch pre-downloads full of the task when download with range request. |
|
||||
| client.config.proxy.readBufferSize | int | `32768` | readBufferSize is the buffer size for reading piece from disk, default is 32KB. |
|
||||
| client.config.proxy.prefetch | bool | `true` | prefetch pre-downloads full of the task when download with range request. `X-Dragonfly-Prefetch` header's priority is higher than prefetch in config. If the value is "true", the range request will prefetch the entire file. If the value is "false", the range request will fetch the range content. |
|
||||
| client.config.proxy.prefetchRateLimit | string | `"5GiB"` | prefetchRateLimit is the rate limit of prefetching in GiB/Mib/Kib per second, default is 5GiB/s. The prefetch request has lower priority so limit the rate to avoid occupying the bandwidth impact other download tasks. |
|
||||
| client.config.proxy.readBufferSize | int | `4194304` | readBufferSize is the buffer size for reading piece from disk, default is 4MiB. |
|
||||
| client.config.proxy.registryMirror.addr | string | `"https://index.docker.io"` | addr is the default address of the registry mirror. Proxy will start a registry mirror service for the client to pull the image. The client can use the default address of the registry mirror in configuration to pull the image. The `X-Dragonfly-Registry` header can instead of the default address of registry mirror. |
|
||||
| client.config.proxy.rules | list | `[{"regex":"blobs/sha256.*"}]` | rules is the list of rules for the proxy server. regex is the regex of the request url. useTLS indicates whether use tls for the proxy backend. redirect is the redirect url. filteredQueryParams is the filtered query params to generate the task id. When filter is ["Signature", "Expires", "ns"], for example: http://example.com/xyz?Expires=e1&Signature=s1&ns=docker.io and http://example.com/xyz?Expires=e2&Signature=s2&ns=docker.io will generate the same task id. Default value includes the filtered query params of s3, gcs, oss, obs, cos. |
|
||||
| client.config.proxy.rules | list | `[{"regex":"blobs/sha256.*"}]` | rules is the list of rules for the proxy server. regex is the regex of the request url. useTLS indicates whether use tls for the proxy backend. redirect is the redirect url. filteredQueryParams is the filtered query params to generate the task id. When filter is ["Signature", "Expires", "ns"], for example: http://example.com/xyz?Expires=e1&Signature=s1&ns=docker.io and http://example.com/xyz?Expires=e2&Signature=s2&ns=docker.io will generate the same task id. Default value includes the filtered query params of s3, gcs, oss, obs, cos. `X-Dragonfly-Use-P2P` header can instead of the regular expression of the rule. If the value is "true", the request will use P2P technology to distribute the content. If the value is "false", but url matches the regular expression in rules. The request will also use P2P technology to distribute the content. |
|
||||
| client.config.proxy.server.port | int | `4001` | port is the port to the proxy server. |
|
||||
| client.config.scheduler.announceInterval | string | `"5m"` | announceInterval is the interval to announce peer to the scheduler. Announcer will provide the scheduler with peer information for scheduling, peer information includes cpu, memory, etc. |
|
||||
| client.config.scheduler.enableBackToSource | bool | `true` | enableBackToSource indicates whether enable back-to-source download, when the scheduling failed. |
|
||||
| client.config.scheduler.maxScheduleCount | int | `5` | maxScheduleCount is the max count of schedule. |
|
||||
| client.config.scheduler.scheduleTimeout | string | `"30s"` | scheduleTimeout is the timeout for scheduling. If the scheduling timesout, dfdaemon will back-to-source download if enableBackToSource is true, otherwise dfdaemon will return download failed. |
|
||||
| client.config.security.enable | bool | `false` | enable indicates whether enable security. |
|
||||
| client.config.scheduler.scheduleTimeout | string | `"3h"` | scheduleTimeout is timeout for the scheduler to respond to a scheduling request from dfdaemon, default is 3 hours. If the scheduler's response time for a scheduling decision exceeds this timeout, dfdaemon will encounter a `TokioStreamElapsed(Elapsed(()))` error. Behavior upon timeout: - If `enable_back_to_source` is `true`, dfdaemon will attempt to download directly from the source. - Otherwise (if `enable_back_to_source` is `false`), dfdaemon will report a download failure. **Important Considerations Regarding Timeout Triggers**: This timeout isn't solely for the scheduler's direct response. It can also be triggered if the overall duration of the client's interaction with the scheduler for a task (e.g., client downloading initial pieces and reporting their status back to the scheduler) exceeds `schedule_timeout`. During such client-side processing and reporting, the scheduler might be awaiting these updates before sending its comprehensive scheduling response, and this entire period is subject to the `schedule_timeout`. **Configuration Guidance**: To prevent premature timeouts, `schedule_timeout` should be configured to a value greater than the maximum expected time for the *entire scheduling interaction*. This includes: 1. The scheduler's own processing and response time. 2. The time taken by the client to download any initial pieces and download all pieces finished, as this communication is part of the scheduling phase. Setting this value too low can lead to `TokioStreamElapsed` errors even if the network and scheduler are functioning correctly but the combined interaction time is longer than the configured timeout. |
|
||||
| client.config.server.cacheDir | string | `"/var/cache/dragonfly/dfdaemon/"` | cacheDir is the directory to store cache files. |
|
||||
| client.config.server.pluginDir | string | `"/var/lib/dragonfly/plugins/dfdaemon/"` | pluginDir is the directory to store plugins. |
|
||||
| client.config.server.pluginDir | string | `"/usr/local/lib/dragonfly/plugins/dfdaemon/"` | pluginDir is the directory to store plugins. |
|
||||
| client.config.stats.server.port | int | `4004` | port is the port to the stats server. |
|
||||
| client.config.storage.dir | string | `"/var/lib/dragonfly/"` | dir is the directory to store task's metadata and content. |
|
||||
| client.config.storage.readBufferSize | int | `131072` | readBufferSize is the buffer size for reading piece from disk, default is 128KB. |
|
||||
| client.config.storage.writeBufferSize | int | `131072` | writeBufferSize is the buffer size for writing piece to disk, default is 128KB. |
|
||||
| client.config.upload.rateLimit | int | `20000000000` | rateLimit is the default rate limit of the upload speed in bps(bytes per second), default is 20Gbps. |
|
||||
| client.config.storage.keep | bool | `true` | keep indicates whether keep the task's metadata and content when the dfdaemon restarts. |
|
||||
| client.config.storage.readBufferSize | int | `4194304` | readBufferSize is the buffer size for reading piece from disk, default is 4MiB. |
|
||||
| client.config.storage.writeBufferSize | int | `4194304` | writeBufferSize is the buffer size for writing piece to disk, default is 4MiB. |
|
||||
| client.config.storage.writePieceTimeout | string | `"30s"` | writePieceTimeout is the timeout for writing a piece to storage(e.g., disk or cache). |
|
||||
| client.config.tracing.protocol | string | `""` | Protocol specifies the communication protocol for the tracing server. Supported values: "http", "https", "grpc" (default: None). This determines how tracing logs are transmitted to the server. |
|
||||
| client.config.upload.disableShared | bool | `false` | disableShared indicates whether disable to share data with other peers. |
|
||||
| client.config.upload.rateLimit | string | `"50GiB"` | rateLimit is the default rate limit of the upload speed in GiB/Mib/Kib per second, default is 50GiB/s. |
|
||||
| client.config.upload.server.port | int | `4000` | port is the port to the grpc server. |
|
||||
| client.config.verbose | bool | `false` | verbose prints log. |
|
||||
| client.config.upload.server.requestRateLimit | int | `4000` | request_rate_limit is the rate limit of the upload request in the upload grpc server, default is 4000 req/s. |
|
||||
| client.dfinit.config.console | bool | `true` | console prints log. |
|
||||
| client.dfinit.config.containerRuntime.containerd.configPath | string | `"/etc/containerd/config.toml"` | configPath is the path of containerd configuration file. |
|
||||
| client.dfinit.config.containerRuntime.containerd.registries | list | `[{"capabilities":["pull","resolve"],"hostNamespace":"docker.io","serverAddr":"https://index.docker.io"},{"capabilities":["pull","resolve"],"hostNamespace":"ghcr.io","serverAddr":"https://ghcr.io"}]` | registries is the list of containerd registries. hostNamespace is the location where container images and artifacts are sourced, refer to https://github.com/containerd/containerd/blob/main/docs/hosts.md#registry-host-namespace. The registry host namespace portion is [registry_host_name|IP address][:port], such as docker.io, ghcr.io, gcr.io, etc. serverAddr specifies the default server for this registry host namespace, refer to https://github.com/containerd/containerd/blob/main/docs/hosts.md#server-field. capabilities is the list of capabilities in containerd configuration, refer to https://github.com/containerd/containerd/blob/main/docs/hosts.md#capabilities-field. |
|
||||
| client.dfinit.config.containerRuntime.containerd.registries | list | `[{"capabilities":["pull","resolve"],"hostNamespace":"docker.io","serverAddr":"https://index.docker.io","skipVerify":true},{"capabilities":["pull","resolve"],"hostNamespace":"ghcr.io","serverAddr":"https://ghcr.io","skipVerify":true}]` | registries is the list of containerd registries. hostNamespace is the location where container images and artifacts are sourced, refer to https://github.com/containerd/containerd/blob/main/docs/hosts.md#registry-host-namespace. The registry host namespace portion is [registry_host_name|IP address][:port], such as docker.io, ghcr.io, gcr.io, etc. serverAddr specifies the default server for this registry host namespace, refer to https://github.com/containerd/containerd/blob/main/docs/hosts.md#server-field. capabilities is the list of capabilities in containerd configuration, refer to https://github.com/containerd/containerd/blob/main/docs/hosts.md#capabilities-field. skip_verify is the flag to skip verifying the server's certificate, refer to https://github.com/containerd/containerd/blob/main/docs/hosts.md#bypass-tls-verification-example. ca (Certificate Authority Certification) can be set to a path or an array of paths each pointing to a ca file for use in authenticating with the registry namespace, refer to https://github.com/containerd/containerd/blob/main/docs/hosts.md#ca-field. |
|
||||
| client.dfinit.config.log.level | string | `"info"` | Specify the logging level [trace, debug, info, warn, error] |
|
||||
| client.dfinit.config.proxy.addr | string | `"http://127.0.0.1:4001"` | addr is the proxy server address of dfdaemon. |
|
||||
| client.dfinit.config.verbose | bool | `true` | verbose prints log. |
|
||||
| client.dfinit.enable | bool | `false` | Enable dfinit to override configuration of container runtime. |
|
||||
| client.dfinit.image.digest | string | `""` | Image digest. |
|
||||
| client.dfinit.image.pullPolicy | string | `"IfNotPresent"` | Image pull policy. |
|
||||
| client.dfinit.image.registry | string | `"docker.io"` | Image registry. |
|
||||
| client.dfinit.image.repository | string | `"dragonflyoss/dfinit"` | Image repository. |
|
||||
| client.dfinit.image.tag | string | `"v0.1.62"` | Image tag. |
|
||||
| client.enable | bool | `false` | Enable client. |
|
||||
| client.dfinit.image.tag | string | `"v1.0.9"` | Image tag. |
|
||||
| client.dfinit.restartContainerRuntime | bool | `true` | restartContainerRuntime indicates whether to restart container runtime when dfinit is enabled. it should be set to true when your first install dragonfly. If non-hot load configuration changes are made, the container runtime needs to be restarted. |
|
||||
| client.enable | bool | `true` | Enable client. |
|
||||
| client.extraVolumeMounts | list | `[{"mountPath":"/var/lib/dragonfly/","name":"storage"},{"mountPath":"/var/log/dragonfly/dfdaemon/","name":"logs"}]` | Extra volumeMounts for dfdaemon. |
|
||||
| client.extraVolumes | list | `[{"hostPath":{"path":"/var/lib/dragonfly/","type":"DirectoryOrCreate"},"name":"storage"},{"emptyDir":{},"name":"logs"}]` | Extra volumes for dfdaemon. |
|
||||
| client.extraVolumes | list | `[{"emptyDir":{},"name":"storage"},{"emptyDir":{},"name":"logs"}]` | Extra volumes for dfdaemon. |
|
||||
| client.fullnameOverride | string | `""` | Override scheduler fullname. |
|
||||
| client.hostAliases | list | `[]` | Host Aliases. |
|
||||
| client.hostIPC | bool | `true` | hostIPC specify if host IPC should be enabled for peer pod. |
|
||||
|
@ -187,14 +194,15 @@ helm delete dragonfly --namespace dragonfly-system
|
|||
| client.image.pullSecrets | list | `[]` (defaults to global.imagePullSecrets). | Image pull secrets. |
|
||||
| client.image.registry | string | `"docker.io"` | Image registry. |
|
||||
| client.image.repository | string | `"dragonflyoss/client"` | Image repository. |
|
||||
| client.image.tag | string | `"v0.1.62"` | Image tag. |
|
||||
| client.image.tag | string | `"v1.0.9"` | Image tag. |
|
||||
| client.initContainer.image.digest | string | `""` | Image digest. |
|
||||
| client.initContainer.image.pullPolicy | string | `"IfNotPresent"` | Image pull policy. |
|
||||
| client.initContainer.image.registry | string | `"docker.io"` | Image registry. |
|
||||
| client.initContainer.image.repository | string | `"busybox"` | Image repository. |
|
||||
| client.initContainer.image.tag | string | `"latest"` | Image tag. |
|
||||
| client.initContainer.resources | object | `{"limits":{"cpu":"2","memory":"4Gi"},"requests":{"cpu":"0","memory":"0"}}` | Pod resource requests and limits. |
|
||||
| client.maxProcs | string | `""` | maxProcs Limits the number of operating system threads that can execute user-level. Go code simultaneously by setting GOMAXPROCS environment variable, refer to https://golang.org/pkg/runtime. |
|
||||
| client.metrics.enable | bool | `false` | Enable client metrics. |
|
||||
| client.metrics.enable | bool | `true` | Enable client metrics. |
|
||||
| client.metrics.prometheusRule.additionalLabels | object | `{}` | Additional labels. |
|
||||
| client.metrics.prometheusRule.enable | bool | `false` | Enable prometheus rule ref: https://github.com/coreos/prometheus-operator. |
|
||||
| client.metrics.prometheusRule.rules | list | `[{"alert":"ClientDown","annotations":{"message":"Client instance {{ \"{{ $labels.instance }}\" }} is down","summary":"Client instance is down"},"expr":"sum(dragonfly_client_version{container=\"client\"}) == 0","for":"5m","labels":{"severity":"critical"}},{"alert":"ClientHighNumberOfFailedDownloadTask","annotations":{"message":"Client has a high number of failed download task","summary":"Client has a high number of failed download task"},"expr":"sum(irate(dragonfly_client_download_task_failure_total{container=\"client\"}[1m])) > 100","for":"1m","labels":{"severity":"warning"}},{"alert":"ClientSuccessRateOfDownloadingTask","annotations":{"message":"Client's success rate of downloading task is low","summary":"Client's success rate of downloading task is low"},"expr":"(sum(rate(dragonfly_client_download_task_total{container=\"client\"}[1m])) - sum(rate(dragonfly_client_download_task_failure_total{container=\"client\"}[1m]))) / sum(rate(dragonfly_client_download_task_total{container=\"client\"}[1m])) < 0.6","for":"5m","labels":{"severity":"critical"}}]` | Prometheus rules. |
|
||||
|
@ -211,145 +219,12 @@ helm delete dragonfly --namespace dragonfly-system
|
|||
| client.podAnnotations | object | `{}` | Pod annotations. |
|
||||
| client.podLabels | object | `{}` | Pod labels. |
|
||||
| client.priorityClassName | string | `""` | Pod priorityClassName. |
|
||||
| client.resources | object | `{"limits":{"cpu":"2","memory":"4Gi"},"requests":{"cpu":"0","memory":"0"}}` | Pod resource requests and limits. |
|
||||
| client.resources | object | `{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"0","memory":"0"}}` | Pod resource requests and limits. |
|
||||
| client.statefulsetAnnotations | object | `{}` | Statefulset annotations. |
|
||||
| client.terminationGracePeriodSeconds | string | `nil` | Pod terminationGracePeriodSeconds. |
|
||||
| client.tolerations | list | `[]` | List of node taints to tolerate. |
|
||||
| client.updateStrategy | object | `{}` | Update strategy for replicas. |
|
||||
| client.updateStrategy | object | `{"rollingUpdate":{"maxSurge":0,"maxUnavailable":20},"type":"RollingUpdate"}` | Update strategy for replicas. |
|
||||
| clusterDomain | string | `"cluster.local"` | Install application cluster domain. |
|
||||
| containerRuntime | object | `{"containerd":{"configFileName":"","configPathDir":"/etc/containerd","enable":false,"injectConfigPath":false,"injectRegistryCredencials":{"auth":"","enable":false,"identitytoken":"","password":"","username":""},"registries":["https://ghcr.io","https://quay.io","https://harbor.example.com:8443"]},"crio":{"enable":false,"registries":["https://ghcr.io","https://quay.io","https://harbor.example.com:8443"]},"docker":{"caCert":{"commonName":"Dragonfly Authority CA","countryName":"CN","localityName":"Hangzhou","organizationName":"Dragonfly","stateOrProvinceName":"Hangzhou"},"enable":false,"injectHosts":true,"insecure":false,"registryDomains":["ghcr.io","quay.io"],"registryPorts":[443],"restart":false,"skipHosts":["127.0.0.1","docker.io"]},"extraInitContainers":[],"initContainer":{"image":{"digest":"","pullPolicy":"IfNotPresent","registry":"docker.io","repository":"dragonflyoss/openssl","tag":"latest"}}}` | [Experimental] Container runtime support. Choose special container runtime in Kubernetes. Support: Containerd, Docker, CRI-O. |
|
||||
| containerRuntime.containerd | object | `{"configFileName":"","configPathDir":"/etc/containerd","enable":false,"injectConfigPath":false,"injectRegistryCredencials":{"auth":"","enable":false,"identitytoken":"","password":"","username":""},"registries":["https://ghcr.io","https://quay.io","https://harbor.example.com:8443"]}` | [Experimental] Containerd support. |
|
||||
| containerRuntime.containerd.configFileName | string | `""` | Custom config file name, default is config.toml. This is workaround for kops provider, see https://github.com/kubernetes/kops/pull/13090 for more details. |
|
||||
| containerRuntime.containerd.configPathDir | string | `"/etc/containerd"` | Custom config path directory, default is /etc/containerd. e.g. rke2 generator config path is /var/lib/rancher/rke2/agent/etc/containerd/config.toml, docs: https://github.com/rancher/rke2/blob/master/docs/advanced.md#configuring-containerd. |
|
||||
| containerRuntime.containerd.enable | bool | `false` | Enable containerd support. Inject mirror config into ${containerRuntime.containerd.configPathDir}/config.toml, if config_path is enabled in ${containerRuntime.containerd.configPathDir}/config.toml, the config take effect real time, but if config_path is not enabled in ${containerRuntime.containerd.configPathDir}/config.toml, need restart containerd to take effect. When the version in ${containerRuntime.containerd.configPathDir}/config.toml is "1", inject dfdaemon.config.proxy.registryMirror.url as registry mirror and restart containerd. When the version in ${containerRuntime.containerd.configPathDir}/config.toml is "2": 1. when config_path is enabled in ${containerRuntime.containerd.configPathDir}/config.toml, inject containerRuntime.containerd.registries into config_path, 2. when containerRuntime.containerd.injectConfigPath=true, inject config_path into ${containerRuntime.containerd.configPathDir}/config.toml and inject containerRuntime.containerd.registries into config_path, 3. when not config_path in ${containerRuntime.containerd.configPathDir}/config.toml and containerRuntime.containerd.injectConfigPath=false, inject dfdaemon.config.proxy.registryMirror.url as registry mirror and restart containerd. |
|
||||
| containerRuntime.containerd.injectConfigPath | bool | `false` | Config path for multiple registries. By default, init container will check ${containerRuntime.containerd.configPathDir}/config.toml, whether is config_path configured, if not, init container will just add the dfdaemon.config.proxy.registryMirror.url for registry mirror. When configPath is true, init container will inject config_path=${containerRuntime.containerd.configPathDir}/certs.d and configure all registries. |
|
||||
| containerRuntime.containerd.injectRegistryCredencials | object | `{"auth":"","enable":false,"identitytoken":"","password":"","username":""}` | Credencials for authenticating to private registries. By default this is aplicable for single registry mode, for reference see docs: https://github.com/containerd/containerd/blob/v1.6.4/docs/cri/registry.md#configure-registry-credentials. |
|
||||
| containerRuntime.crio | object | `{"enable":false,"registries":["https://ghcr.io","https://quay.io","https://harbor.example.com:8443"]}` | [Experimental] CRI-O support. |
|
||||
| containerRuntime.crio.enable | bool | `false` | Enable CRI-O support. Inject drop-in mirror config into /etc/containers/registries.conf.d. |
|
||||
| containerRuntime.docker | object | `{"caCert":{"commonName":"Dragonfly Authority CA","countryName":"CN","localityName":"Hangzhou","organizationName":"Dragonfly","stateOrProvinceName":"Hangzhou"},"enable":false,"injectHosts":true,"insecure":false,"registryDomains":["ghcr.io","quay.io"],"registryPorts":[443],"restart":false,"skipHosts":["127.0.0.1","docker.io"]}` | [Experimental] Support docker, when use docker-shim in Kubernetes, please set containerRuntime.docker.enable to true. For supporting docker, we need generate CA and update certs, then hijack registries traffic, By default, it's unnecessary to restart docker daemon when pull image from private registries, this feature is support explicit registries in containerRuntime.registry.domains, default domains is ghcr.io, quay.io, please update your registries by `--set containerRuntime.registry.domains='{harbor.example.com,harbor.example.net}' --set containerRuntime.registry.injectHosts=true --set containerRuntime.docker.enable=true`. Caution: **We did not recommend to using dragonfly with docker in Kubernetes** due to many reasons: 1. no fallback image pulling policy. 2. deprecated in Kubernetes. Because the original `daemonset` in Kubernetes did not support `Surging Rolling Update` policy. When kill current dfdaemon pod, the new pod image can not be pulled anymore. If you can not change runtime from docker to others, remind to choose a plan when upgrade dfdaemon: Option 1: pull newly dfdaemon image manually before upgrade dragonfly, or use [ImagePullJob](https://openkruise.io/docs/user-manuals/imagepulljob) to pull image automate. Option 2: keep the image registry of dragonfly is different from common registries and add host in `containerRuntime.docker.skipHosts`. Caution: docker hub image is not supported without restart docker daemon. When need pull image from docker hub or any other registries not in containerRuntime.registry.domains, set containerRuntime.docker.restart=true this feature will inject proxy config into docker.service and restart docker daemon. Caution: set restart to true only when live restore is enable. Requirement: Docker Engine v1.2.0+ without Rootless. |
|
||||
| containerRuntime.docker.caCert | object | `{"commonName":"Dragonfly Authority CA","countryName":"CN","localityName":"Hangzhou","organizationName":"Dragonfly","stateOrProvinceName":"Hangzhou"}` | CA cert info for generating. |
|
||||
| containerRuntime.docker.enable | bool | `false` | Enable docker support. Inject ca cert into /etc/docker/certs.d/, Refer: https://docs.docker.com/engine/security/certificates/. |
|
||||
| containerRuntime.docker.injectHosts | bool | `true` | Inject domains into /etc/hosts to force redirect traffic to dfdaemon. Caution: This feature need dfdaemon to implement SNI Proxy, confirm image tag is greater than or equal to v2.0.0. When use certs and inject hosts in docker, no necessary to restart docker daemon. |
|
||||
| containerRuntime.docker.insecure | bool | `false` | Skip verify remote tls cert in dfdaemon. If registry cert is private or self-signed, set to true. Caution: this option is test only. When deploy in production, should not skip verify tls cert. |
|
||||
| containerRuntime.docker.registryDomains | list | `["ghcr.io","quay.io"]` | Registry domains. By default, docker pull image via https, currently, by default 443 port with https. If not standard port, update registryPorts. |
|
||||
| containerRuntime.docker.registryPorts | list | `[443]` | Registry ports. |
|
||||
| containerRuntime.docker.restart | bool | `false` | Restart docker daemon to redirect traffic to dfdaemon. When containerRuntime.docker.restart=true, containerRuntime.docker.injectHosts and containerRuntime.registry.domains is ignored. If did not want restart docker daemon, keep containerRuntime.docker.restart=false and containerRuntime.docker.injectHosts=true. |
|
||||
| containerRuntime.docker.skipHosts | list | `["127.0.0.1","docker.io"]` | Skip hosts. Some traffic did not redirect to dragonfly, like 127.0.0.1, and the image registries of dragonfly itself. The format likes NO_PROXY in golang, refer: https://github.com/golang/net/blob/release-branch.go1.15/http/httpproxy/proxy.go#L39. Caution: Some registries use s3 or oss for backend storage, when add registries to skipHosts, don't forget add the corresponding backend storage. |
|
||||
| containerRuntime.extraInitContainers | list | `[]` | Additional init containers. |
|
||||
| containerRuntime.initContainer | object | `{"image":{"digest":"","pullPolicy":"IfNotPresent","registry":"docker.io","repository":"dragonflyoss/openssl","tag":"latest"}}` | The image name of init container, need include openssl for ca generating. |
|
||||
| containerRuntime.initContainer.image.digest | string | `""` | Image digest. |
|
||||
| containerRuntime.initContainer.image.pullPolicy | string | `"IfNotPresent"` | Image pull policy. |
|
||||
| containerRuntime.initContainer.image.registry | string | `"docker.io"` | Image registry. |
|
||||
| containerRuntime.initContainer.image.repository | string | `"dragonflyoss/openssl"` | Image repository. |
|
||||
| containerRuntime.initContainer.image.tag | string | `"latest"` | Image tag. |
|
||||
| dfdaemon.config.aliveTime | string | `"0s"` | Daemon alive time, when sets 0s, daemon will not auto exit, it is useful for longtime running. |
|
||||
| dfdaemon.config.announcer.schedulerInterval | string | `"30s"` | schedulerInterval is the interval of announcing scheduler. Announcer will provide the scheduler with peer information for scheduling. Peer information includes cpu, memory, etc. |
|
||||
| dfdaemon.config.cacheDir | string | `""` | Dynconfig cache directory. |
|
||||
| dfdaemon.config.console | bool | `false` | Console shows log on console. |
|
||||
| dfdaemon.config.dataDir | string | `"/var/lib/dragonfly"` | Daemon data storage directory. |
|
||||
| dfdaemon.config.download.calculateDigest | bool | `false` | Calculate digest, when only pull images, can be false to save cpu and memory. |
|
||||
| dfdaemon.config.download.downloadGRPC.security | object | `{"insecure":true,"tlsVerify":true}` | Download grpc security option. |
|
||||
| dfdaemon.config.download.downloadGRPC.unixListen | object | `{"socket":""}` | Download service listen address. current, only support unix domain socket. |
|
||||
| dfdaemon.config.download.peerGRPC.security | object | `{"insecure":true}` | Peer grpc security option. |
|
||||
| dfdaemon.config.download.peerGRPC.tcpListen.port | int | `65000` | Listen port. |
|
||||
| dfdaemon.config.download.perPeerRateLimit | string | `"512Mi"` | Per peer task limit per second[B]. |
|
||||
| dfdaemon.config.download.prefetch | bool | `false` | When request data with range header, prefetch data not in range. |
|
||||
| dfdaemon.config.download.totalRateLimit | string | `"1024Mi"` | Total download limit per second[B]. |
|
||||
| dfdaemon.config.gcInterval | string | `"1m0s"` | Daemon gc task running interval. |
|
||||
| dfdaemon.config.health.path | string | `"/server/ping"` | |
|
||||
| dfdaemon.config.health.tcpListen.port | int | `40901` | |
|
||||
| dfdaemon.config.host.idc | string | `""` | IDC deployed by daemon. |
|
||||
| dfdaemon.config.host.location | string | `""` | Geographical location, separated by "|" characters. |
|
||||
| dfdaemon.config.jaeger | string | `""` | |
|
||||
| dfdaemon.config.keepStorage | bool | `false` | When daemon exit, keep peer task data or not. it is usefully when upgrade daemon service, all local cache will be saved. default is false. |
|
||||
| dfdaemon.config.logDir | string | `""` | Log directory. |
|
||||
| dfdaemon.config.network.enableIPv6 | bool | `false` | enableIPv6 enables ipv6. |
|
||||
| dfdaemon.config.networkTopology.enable | bool | `false` | Enable networkTopology service. |
|
||||
| dfdaemon.config.networkTopology.probe.interval | string | `"20m"` | interval is the interval of probing hosts. |
|
||||
| dfdaemon.config.objectStorage.enable | bool | `false` | Enable object storage service. |
|
||||
| dfdaemon.config.objectStorage.filter | string | `"Expires&Signature&ns"` | Filter is used to generate a unique Task ID by filtering unnecessary query params in the URL, it is separated by & character. When filter: "Expires&Signature&ns", for example: http://localhost/xyz?Expires=111&Signature=222&ns=docker.io and http://localhost/xyz?Expires=333&Signature=999&ns=docker.io is same task. |
|
||||
| dfdaemon.config.objectStorage.maxReplicas | int | `3` | MaxReplicas is the maximum number of replicas of an object cache in seed peers. |
|
||||
| dfdaemon.config.objectStorage.security | object | `{"insecure":true,"tlsVerify":true}` | Object storage service security option. |
|
||||
| dfdaemon.config.objectStorage.tcpListen.port | int | `65004` | Listen port. |
|
||||
| dfdaemon.config.pluginDir | string | `""` | Plugin directory. |
|
||||
| dfdaemon.config.pprofPort | int | `-1` | Listen port for pprof, only valid when the verbose option is true. default is -1. If it is 0, pprof will use a random port. |
|
||||
| dfdaemon.config.proxy.defaultFilter | string | `"Expires&Signature&ns"` | Filter for hash url. when defaultFilter: "Expires&Signature&ns", for example: http://localhost/xyz?Expires=111&Signature=222&ns=docker.io and http://localhost/xyz?Expires=333&Signature=999&ns=docker.io is same task, it is also possible to override the default filter by adding the X-Dragonfly-Filter header through the proxy. |
|
||||
| dfdaemon.config.proxy.defaultTag | string | `""` | Tag the task. when the value of the default tag is different, the same download url can be divided into different tasks according to the tag, it is also possible to override the default tag by adding the X-Dragonfly-Tag header through the proxy. |
|
||||
| dfdaemon.config.proxy.proxies[0] | object | `{"regx":"blobs/sha256.*"}` | Proxy all http image layer download requests with dfget. |
|
||||
| dfdaemon.config.proxy.registryMirror.dynamic | bool | `true` | When enabled, use value of "X-Dragonfly-Registry" in http header for remote instead of url host. |
|
||||
| dfdaemon.config.proxy.registryMirror.insecure | bool | `false` | When the cert of above url is secure, set insecure to true. |
|
||||
| dfdaemon.config.proxy.registryMirror.url | string | `"https://index.docker.io"` | URL for the registry mirror. |
|
||||
| dfdaemon.config.proxy.security | object | `{"insecure":true,"tlsVerify":false}` | Proxy security option. |
|
||||
| dfdaemon.config.proxy.tcpListen.namespace | string | `"/run/dragonfly/net"` | Namespace stands the linux net namespace, like /proc/1/ns/net. it's useful for running daemon in pod with ip allocated and listening the special port in host net namespace. Linux only. |
|
||||
| dfdaemon.config.scheduler | object | `{"disableAutoBackSource":false,"manager":{"enable":true,"netAddrs":null,"refreshInterval":"10m","seedPeer":{"clusterID":1,"enable":false,"type":"super"}},"netAddrs":null,"scheduleTimeout":"30s"}` | Scheduler config, netAddrs is auto-configured in templates/dfdaemon/dfdaemon-configmap.yaml. |
|
||||
| dfdaemon.config.scheduler.disableAutoBackSource | bool | `false` | Disable auto back source in dfdaemon. |
|
||||
| dfdaemon.config.scheduler.manager.enable | bool | `true` | Get scheduler list dynamically from manager. |
|
||||
| dfdaemon.config.scheduler.manager.netAddrs | string | `nil` | Manager service address, netAddr is a list, there are two fields type and addr. |
|
||||
| dfdaemon.config.scheduler.manager.refreshInterval | string | `"10m"` | Scheduler list refresh interval. |
|
||||
| dfdaemon.config.scheduler.manager.seedPeer.clusterID | int | `1` | Associated seed peer cluster id. |
|
||||
| dfdaemon.config.scheduler.manager.seedPeer.enable | bool | `false` | Enable seed peer mode. |
|
||||
| dfdaemon.config.scheduler.manager.seedPeer.type | string | `"super"` | Seed peer supports "super", "strong" and "weak" types. |
|
||||
| dfdaemon.config.scheduler.netAddrs | string | `nil` | Scheduler service address, netAddr is a list, there are two fields type and addr. Also set dfdaemon.config.scheduler.manager.enable to false to take effect. |
|
||||
| dfdaemon.config.scheduler.scheduleTimeout | string | `"30s"` | Schedule timeout. |
|
||||
| dfdaemon.config.security.autoIssueCert | bool | `false` | AutoIssueCert indicates to issue client certificates for all grpc call. If AutoIssueCert is false, any other option in Security will be ignored. |
|
||||
| dfdaemon.config.security.caCert | string | `""` | CACert is the root CA certificate for all grpc tls handshake, it can be path or PEM format string. |
|
||||
| dfdaemon.config.security.certSpec.dnsNames | string | `nil` | DNSNames is a list of dns names be set on the certificate. |
|
||||
| dfdaemon.config.security.certSpec.ipAddresses | string | `nil` | IPAddresses is a list of ip addresses be set on the certificate. |
|
||||
| dfdaemon.config.security.certSpec.validityPeriod | string | `"4320h"` | ValidityPeriod is the validity period of certificate. |
|
||||
| dfdaemon.config.security.tlsPolicy | string | `"prefer"` | TLSPolicy controls the grpc shandshake behaviors: force: both ClientHandshake and ServerHandshake are only support tls. prefer: ServerHandshake supports tls and insecure (non-tls), ClientHandshake will only support tls. default: ServerHandshake supports tls and insecure (non-tls), ClientHandshake will only support insecure (non-tls). Notice: If the drgaonfly service has been deployed, a two-step upgrade is required. The first step is to set tlsPolicy to default, and then upgrade the dragonfly services. The second step is to set tlsPolicy to prefer, and tthen completely upgrade the dragonfly services. |
|
||||
| dfdaemon.config.security.tlsVerify | bool | `false` | TLSVerify indicates to verify certificates. |
|
||||
| dfdaemon.config.storage.diskGCThreshold | string | `"50Gi"` | Disk GC Threshold, when the disk usage is above 50Gi, start to gc the oldest tasks. |
|
||||
| dfdaemon.config.storage.multiplex | bool | `true` | Set to ture for reusing underlying storage for same task id. |
|
||||
| dfdaemon.config.storage.strategy | string | `"io.d7y.storage.v2.simple"` | Storage strategy when process task data. io.d7y.storage.v2.simple : download file to data directory first, then copy to output path, this is default action the download file in date directory will be the peer data for uploading to other peers. io.d7y.storage.v2.advance: download file directly to output path with postfix, hard link to final output, avoid copy to output path, fast than simple strategy, but: the output file with postfix will be the peer data for uploading to other peers when user delete or change this file, this peer data will be corrupted. default is io.d7y.storage.v2.advance. |
|
||||
| dfdaemon.config.storage.taskExpireTime | string | `"6h"` | Task data expire time. when there is no access to a task data, this task will be gc. |
|
||||
| dfdaemon.config.upload.rateLimit | string | `"1024Mi"` | Upload limit per second[B]. |
|
||||
| dfdaemon.config.upload.security | object | `{"insecure":true,"tlsVerify":false}` | Upload grpc security option. |
|
||||
| dfdaemon.config.upload.tcpListen.port | int | `65002` | Listen port. |
|
||||
| dfdaemon.config.verbose | bool | `false` | Whether to enable debug level logger and enable pprof. |
|
||||
| dfdaemon.config.workHome | string | `""` | Work directory. |
|
||||
| dfdaemon.containerPort | int | `65001` | Pod containerPort. |
|
||||
| dfdaemon.daemonsetAnnotations | object | `{}` | Daemonset annotations. |
|
||||
| dfdaemon.enable | bool | `true` | Enable dfdaemon. |
|
||||
| dfdaemon.extraVolumeMounts | list | `[{"mountPath":"/var/log/dragonfly/daemon","name":"logs"}]` | Extra volumeMounts for dfdaemon. |
|
||||
| dfdaemon.extraVolumes | list | `[{"emptyDir":{},"name":"logs"}]` | Extra volumes for dfdaemon. |
|
||||
| dfdaemon.fullnameOverride | string | `""` | Override dfdaemon fullname. |
|
||||
| dfdaemon.hostAliases | list | `[]` | Host Aliases. |
|
||||
| dfdaemon.hostNetwork | bool | `false` | Using hostNetwork when pod with host network can communicate with normal pods with cni network. |
|
||||
| dfdaemon.hostPort | int | `65001` | When .hostNetwork == false, and .config.proxy.tcpListen.namespace is empty. many network add-ons do not yet support hostPort. https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm/#hostport-services-do-not-work by default, dfdaemon injects the 65001 port to host network by sharing host network namespace, if you want to use hostPort, please empty .config.proxy.tcpListen.namespace below, and keep .hostNetwork == false. for performance, injecting the 65001 port to host network is better than hostPort. |
|
||||
| dfdaemon.image.digest | string | `""` | Image digest. |
|
||||
| dfdaemon.image.pullPolicy | string | `"IfNotPresent"` | Image pull policy. |
|
||||
| dfdaemon.image.pullSecrets | list | `[]` (defaults to global.imagePullSecrets). | Image pull secrets. |
|
||||
| dfdaemon.image.registry | string | `"docker.io"` | Image registry. |
|
||||
| dfdaemon.image.repository | string | `"dragonflyoss/dfdaemon"` | Image repository. |
|
||||
| dfdaemon.image.tag | string | `"v2.1.44"` | Image tag. |
|
||||
| dfdaemon.initContainer.image.digest | string | `""` | Image digest. |
|
||||
| dfdaemon.initContainer.image.pullPolicy | string | `"IfNotPresent"` | Image pull policy. |
|
||||
| dfdaemon.initContainer.image.registry | string | `"docker.io"` | Image registry. |
|
||||
| dfdaemon.initContainer.image.repository | string | `"busybox"` | Image repository. |
|
||||
| dfdaemon.initContainer.image.tag | string | `"latest"` | Image tag. |
|
||||
| dfdaemon.maxProcs | string | `""` | maxProcs Limits the number of operating system threads that can execute user-level. Go code simultaneously by setting GOMAXPROCS environment variable, refer to https://golang.org/pkg/runtime. |
|
||||
| dfdaemon.metrics.enable | bool | `false` | Enable peer metrics. |
|
||||
| dfdaemon.metrics.podMonitor.additionalLabels | object | `{}` | Additional labels. |
|
||||
| dfdaemon.metrics.podMonitor.enable | bool | `false` | Enable prometheus pod monitor. ref: https://github.com/coreos/prometheus-operator. |
|
||||
| dfdaemon.metrics.podMonitor.interval | string | `"30s"` | Interval at which metrics should be scraped. |
|
||||
| dfdaemon.metrics.podMonitor.scrapeTimeout | string | `"10s"` | Timeout after which the scrape is ended. |
|
||||
| dfdaemon.metrics.prometheusRule.additionalLabels | object | `{}` | Additional labels. |
|
||||
| dfdaemon.metrics.prometheusRule.enable | bool | `false` | Enable prometheus rule. ref: https://github.com/coreos/prometheus-operator. |
|
||||
| dfdaemon.metrics.prometheusRule.rules | list | `[{"alert":"PeerDown","annotations":{"message":"Peer instance {{ \"{{ $labels.instance }}\" }} is down","summary":"Peer instance is down"},"expr":"sum(dragonfly_dfdaemon_version{}) == 0","for":"5m","labels":{"severity":"critical"}},{"alert":"PeerHighNumberOfFailedDownloadTask","annotations":{"message":"Peer has a high number of failed download task","summary":"Peer has a high number of failed download task"},"expr":"sum(irate(dragonfly_dfdaemon_peer_task_failed_total{}[1m])) > 100","for":"1m","labels":{"severity":"warning"}},{"alert":"PeerSuccessRateOfDownloadingTask","annotations":{"message":"Peer's success rate of downloading task is low","summary":"Peer's success rate of downloading task is low"},"expr":"(sum(rate(dragonfly_dfdaemon_peer_task_total{container=\"seed-peer\"}[1m])) - sum(rate(dragonfly_dfdaemon_peer_task_failed_total{container=\"seed-peer\"}[1m]))) / sum(rate(dragonfly_dfdaemon_peer_task_total{container=\"seed-peer\"}[1m])) < 0.6","for":"5m","labels":{"severity":"critical"}},{"alert":"PeerHighNumberOfFailedGRPCRequest","annotations":{"message":"Peer has a high number of failed grpc request","summary":"Peer has a high number of failed grpc request"},"expr":"sum(rate(grpc_server_started_total{grpc_service=\"dfdaemon.Daemon\",grpc_type=\"unary\"}[1m])) - sum(rate(grpc_server_handled_total{grpc_service=\"dfdaemon.Daemon\",grpc_type=\"unary\",grpc_code=\"OK\"}[1m])) + sum(rate(grpc_server_handled_total{grpc_service=\"dfdaemon.Daemon\",grpc_type=\"unary\",grpc_code=\"NotFound\"}[1m])) + sum(rate(grpc_server_handled_total{grpc_service=\"dfdaemon.Daemon\",grpc_type=\"unary\",grpc_code=\"PermissionDenied\"}[1m])) + sum(rate(grpc_server_handled_total{grpc_service=\"dfdaemon.Daemon\",grpc_type=\"unary\",grpc_code=\"InvalidArgument\"}[1m])) > 100","for":"1m","labels":{"severity":"warning"}},{"alert":"PeerSuccessRateOfGRPCRequest","annotations":{"message":"Peer's success rate of grpc request is low","summary":"Peer's success rate of grpc request is low"},"expr":"(sum(rate(grpc_server_handled_total{grpc_service=\"dfdaemon.Daemon\",grpc_type=\"unary\",grpc_code=\"OK\"}[1m])) + sum(rate(grpc_server_handled_total{grpc_service=\"dfdaemon.Daemon\",grpc_type=\"unary\",grpc_code=\"NotFound\"}[1m])) + sum(rate(grpc_server_handled_total{grpc_service=\"dfdaemon.Daemon\",grpc_type=\"unary\",grpc_code=\"PermissionDenied\"}[1m])) + sum(rate(grpc_server_handled_total{grpc_service=\"dfdaemon.Daemon\",grpc_type=\"unary\",grpc_code=\"InvalidArgument\"}[1m]))) / sum(rate(grpc_server_started_total{grpc_service=\"dfdaemon.Daemon\",grpc_type=\"unary\"}[1m])) < 0.6","for":"5m","labels":{"severity":"critical"}}]` | Prometheus rules. |
|
||||
| dfdaemon.metrics.service.annotations | object | `{}` | Service annotations. |
|
||||
| dfdaemon.metrics.service.labels | object | `{}` | Service labels. |
|
||||
| dfdaemon.metrics.service.type | string | `"ClusterIP"` | Service type. |
|
||||
| dfdaemon.mountDataDirAsHostPath | bool | `false` | Mount data directory from host. when enabled, mount host path to dfdaemon, or just emptyDir in dfdaemon. |
|
||||
| dfdaemon.name | string | `"dfdaemon"` | Dfdaemon name. |
|
||||
| dfdaemon.nameOverride | string | `""` | Override dfdaemon name. |
|
||||
| dfdaemon.nodeSelector | object | `{}` | Node labels for pod assignment. |
|
||||
| dfdaemon.podAnnotations | object | `{}` | Pod annotations. |
|
||||
| dfdaemon.podLabels | object | `{}` | Pod labels. |
|
||||
| dfdaemon.priorityClassName | string | `""` | Pod priorityClassName. |
|
||||
| dfdaemon.resources | object | `{"limits":{"cpu":"2","memory":"2Gi"},"requests":{"cpu":"0","memory":"0"}}` | Pod resource requests and limits. |
|
||||
| dfdaemon.terminationGracePeriodSeconds | string | `nil` | Pod terminationGracePeriodSeconds. |
|
||||
| dfdaemon.tolerations | list | `[]` | List of node taints to tolerate. |
|
||||
| dfdaemon.updateStrategy | object | `{}` | Update strategy for replicas. |
|
||||
| externalManager.grpcPort | int | `65003` | External GRPC service port. |
|
||||
| externalManager.host | string | `nil` | External manager hostname. |
|
||||
| externalManager.restPort | int | `8080` | External REST service port. |
|
||||
|
@ -364,59 +239,46 @@ helm delete dragonfly --namespace dragonfly-system
|
|||
| externalRedis.brokerDB | int | `1` | External redis broker db. |
|
||||
| externalRedis.db | int | `0` | External redis db. |
|
||||
| externalRedis.masterName | string | `""` | External redis sentinel master name. |
|
||||
| externalRedis.networkTopologyDB | int | `3` | External redis networkTopology db. |
|
||||
| externalRedis.password | string | `""` | External redis password. |
|
||||
| externalRedis.sentinelPassword | string | `""` | External redis sentinel password. |
|
||||
| externalRedis.sentinelUsername | string | `""` | External redis sentinel addresses. |
|
||||
| externalRedis.username | string | `""` | External redis username. |
|
||||
| fullnameOverride | string | `""` | Override dragonfly fullname. |
|
||||
| global.imagePullSecrets | list | `[]` | Global Docker registry secret names as an array. |
|
||||
| global.imageRegistry | string | `""` | Global Docker image registry. |
|
||||
| global.nodeSelector | object | `{}` | Global node labels for pod assignment. |
|
||||
| global.storageClass | string | `""` | Global storageClass for Persistent Volume(s). |
|
||||
| jaeger.agent.enabled | bool | `false` | |
|
||||
| jaeger.allInOne.enabled | bool | `true` | |
|
||||
| jaeger.collector.enabled | bool | `false` | |
|
||||
| jaeger.enable | bool | `false` | Enable an all-in-one jaeger for tracing every downloading event should not use in production environment. |
|
||||
| jaeger.provisionDataStore.cassandra | bool | `false` | |
|
||||
| jaeger.query.enabled | bool | `false` | |
|
||||
| jaeger.storage.type | string | `"none"` | |
|
||||
| manager.config.auth.jwt.key | string | `"ZHJhZ29uZmx5Cg=="` | Key is secret key used for signing, default value is encoded base64 of dragonfly. Please change the key in production. |
|
||||
| manager.config.auth.jwt.maxRefresh | string | `"48h"` | MaxRefresh field allows clients to refresh their token until MaxRefresh has passed, default duration is two days. |
|
||||
| manager.config.auth.jwt.realm | string | `"Dragonfly"` | Realm name to display to the user, default value is Dragonfly. |
|
||||
| manager.config.auth.jwt.timeout | string | `"48h"` | Timeout is duration that a jwt token is valid, default duration is two days. |
|
||||
| manager.config.cache.local.size | int | `200000` | Size of LFU cache. |
|
||||
| manager.config.cache.local.size | int | `30000` | Size of LFU cache. |
|
||||
| manager.config.cache.local.ttl | string | `"3m"` | Local cache TTL duration. |
|
||||
| manager.config.cache.redis.ttl | string | `"5m"` | Redis cache TTL duration. |
|
||||
| manager.config.console | bool | `false` | Console shows log on console. |
|
||||
| manager.config.jaeger | string | `""` | |
|
||||
| manager.config.job.preheat | object | `{"registryTimeout":"1m"}` | Preheat configuration. |
|
||||
| manager.config.console | bool | `true` | Console shows log on console. |
|
||||
| manager.config.job.gc | object | `{"interval":"3h","ttl":"6h"}` | gc configuration. |
|
||||
| manager.config.job.gc.interval | string | `"3h"` | interval is the interval of gc. |
|
||||
| manager.config.job.gc.ttl | string | `"6h"` | ttl is the ttl of job. |
|
||||
| manager.config.job.preheat | object | `{"registryTimeout":"1m","tls":{"insecureSkipVerify":false}}` | Preheat configuration. |
|
||||
| manager.config.job.preheat.registryTimeout | string | `"1m"` | registryTimeout is the timeout for requesting registry to get token and manifest. |
|
||||
| manager.config.job.preheat.tls.insecureSkipVerify | bool | `false` | insecureSkipVerify controls whether a client verifies the server's certificate chain and hostname. |
|
||||
| manager.config.job.rateLimit | object | `{"capacity":10,"fillInterval":"1m","quantum":10}` | rateLimit configuration. |
|
||||
| manager.config.job.rateLimit.capacity | int | `10` | capacity is the maximum number of requests that can be consumed in a single fillInterval. |
|
||||
| manager.config.job.rateLimit.fillInterval | string | `"1m"` | fillInterval is the interval for refilling the bucket. |
|
||||
| manager.config.job.rateLimit.quantum | int | `10` | quantum is the number of tokens taken from the bucket for each request. |
|
||||
| manager.config.job.syncPeers | object | `{"interval":"24h","timeout":"10m"}` | Sync peers configuration. |
|
||||
| manager.config.job.syncPeers.interval | string | `"24h"` | interval is the interval for syncing all peers information from the scheduler and display peers information in the manager console. |
|
||||
| manager.config.job.syncPeers.timeout | string | `"10m"` | timeout is the timeout for syncing peers information from the single scheduler. |
|
||||
| manager.config.network.enableIPv6 | bool | `false` | enableIPv6 enables ipv6. |
|
||||
| manager.config.objectStorage.accessKey | string | `""` | AccessKey is access key ID. |
|
||||
| manager.config.objectStorage.enable | bool | `false` | Enable object storage. |
|
||||
| manager.config.objectStorage.endpoint | string | `""` | Endpoint is datacenter endpoint. |
|
||||
| manager.config.objectStorage.name | string | `"s3"` | Name is object storage name of type, it can be s3 or oss. |
|
||||
| manager.config.objectStorage.region | string | `""` | Reigon is storage region. |
|
||||
| manager.config.objectStorage.s3ForcePathStyle | bool | `true` | S3ForcePathStyle sets force path style for s3, true by default. Set this to `true` to force the request to use path-style addressing, i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client will use virtual hosted bucket addressing when possible (`http://BUCKET.s3.amazonaws.com/KEY`). Refer to https://github.com/aws/aws-sdk-go/blob/main/aws/config.go#L118. |
|
||||
| manager.config.objectStorage.secretKey | string | `""` | SecretKey is access key secret. |
|
||||
| manager.config.pprofPort | int | `-1` | Listen port for pprof, only valid when the verbose option is true default is -1. If it is 0, pprof will use a random port. |
|
||||
| manager.config.security.autoIssueCert | bool | `false` | AutoIssueCert indicates to issue client certificates for all grpc call. If AutoIssueCert is false, any other option in Security will be ignored. |
|
||||
| manager.config.security.caCert | string | `""` | CACert is the CA certificate for all grpc tls handshake, it can be path or PEM format string. |
|
||||
| manager.config.security.caKey | string | `""` | CAKey is the CA private key, it can be path or PEM format string. |
|
||||
| manager.config.security.certSpec.dnsNames | list | `["dragonfly-manager","dragonfly-manager.dragonfly-system.svc","dragonfly-manager.dragonfly-system.svc.cluster.local"]` | DNSNames is a list of dns names be set on the certificate. |
|
||||
| manager.config.security.certSpec.ipAddresses | string | `nil` | IPAddresses is a list of ip addresses be set on the certificate. |
|
||||
| manager.config.security.certSpec.validityPeriod | string | `"87600h"` | ValidityPeriod is the validity period of certificate. |
|
||||
| manager.config.security.tlsPolicy | string | `"prefer"` | TLSPolicy controls the grpc shandshake behaviors: force: both ClientHandshake and ServerHandshake are only support tls. prefer: ServerHandshake supports tls and insecure (non-tls), ClientHandshake will only support tls. default: ServerHandshake supports tls and insecure (non-tls), ClientHandshake will only support insecure (non-tls). Notice: If the drgaonfly service has been deployed, a two-step upgrade is required. The first step is to set tlsPolicy to default, and then upgrade the dragonfly services. The second step is to set tlsPolicy to prefer, and tthen completely upgrade the dragonfly services. |
|
||||
| manager.config.pprofPort | int | `-1` | Listen port for pprof, default is -1 (meaning disabled). |
|
||||
| manager.config.server.cacheDir | string | `""` | Dynconfig cache directory. |
|
||||
| manager.config.server.grpc.advertiseIP | string | `""` | GRPC advertise ip. |
|
||||
| manager.config.server.logDir | string | `""` | Log directory. |
|
||||
| manager.config.server.logLevel | string | `"info"` | logLevel specifies the logging level for the manager. Default: "info" Supported values: "debug", "info", "warn", "error", "panic", "fatal" |
|
||||
| manager.config.server.pluginDir | string | `""` | Plugin directory. |
|
||||
| manager.config.server.rest.tls.cert | string | `""` | Certificate file path. |
|
||||
| manager.config.server.rest.tls.key | string | `""` | Key file path. |
|
||||
| manager.config.server.workHome | string | `""` | Work directory. |
|
||||
| manager.config.verbose | bool | `false` | Whether to enable debug level logger and enable pprof. |
|
||||
| manager.config.tracing.protocol | string | `"grpc"` | Protocol specifies the communication protocol for the tracing server. Supported values: "http", "https", "grpc" (default: None). This determines how tracing logs are transmitted to the server. |
|
||||
| manager.deploymentAnnotations | object | `{}` | Deployment annotations. |
|
||||
| manager.enable | bool | `true` | Enable manager. |
|
||||
| manager.extraVolumeMounts | list | `[{"mountPath":"/var/log/dragonfly/manager","name":"logs"}]` | Extra volumeMounts for manager. |
|
||||
|
@ -424,12 +286,13 @@ helm delete dragonfly --namespace dragonfly-system
|
|||
| manager.fullnameOverride | string | `""` | Override manager fullname. |
|
||||
| manager.grpcPort | int | `65003` | GRPC service port. |
|
||||
| manager.hostAliases | list | `[]` | Host Aliases. |
|
||||
| manager.hostNetwork | bool | `false` | hostNetwork specify if host network should be enabled. |
|
||||
| manager.image.digest | string | `""` | Image digest. |
|
||||
| manager.image.pullPolicy | string | `"IfNotPresent"` | Image pull policy. |
|
||||
| manager.image.pullSecrets | list | `[]` (defaults to global.imagePullSecrets). | Image pull secrets. |
|
||||
| manager.image.registry | string | `"docker.io"` | Image registry. |
|
||||
| manager.image.repository | string | `"dragonflyoss/manager"` | Image repository. |
|
||||
| manager.image.tag | string | `"v2.1.44"` | Image tag. |
|
||||
| manager.image.tag | string | `"v2.3.1-rc.2"` | Image tag. |
|
||||
| manager.ingress.annotations | object | `{}` | Ingress annotations. |
|
||||
| manager.ingress.className | string | `""` | Ingress class name. Requirement: kubernetes >=1.18. |
|
||||
| manager.ingress.enable | bool | `false` | Enable ingress. |
|
||||
|
@ -442,8 +305,9 @@ helm delete dragonfly --namespace dragonfly-system
|
|||
| manager.initContainer.image.registry | string | `"docker.io"` | Image registry. |
|
||||
| manager.initContainer.image.repository | string | `"busybox"` | Image repository. |
|
||||
| manager.initContainer.image.tag | string | `"latest"` | Image tag. |
|
||||
| manager.initContainer.resources | object | `{"limits":{"cpu":"2","memory":"4Gi"},"requests":{"cpu":"0","memory":"0"}}` | Pod resource requests and limits. |
|
||||
| manager.maxProcs | string | `""` | maxProcs Limits the number of operating system threads that can execute user-level. Go code simultaneously by setting GOMAXPROCS environment variable, refer to https://golang.org/pkg/runtime. |
|
||||
| manager.metrics.enable | bool | `false` | Enable manager metrics. |
|
||||
| manager.metrics.enable | bool | `true` | Enable manager metrics. |
|
||||
| manager.metrics.prometheusRule.additionalLabels | object | `{}` | Additional labels. |
|
||||
| manager.metrics.prometheusRule.enable | bool | `false` | Enable prometheus rule. ref: https://github.com/coreos/prometheus-operator. |
|
||||
| manager.metrics.prometheusRule.rules | list | `[{"alert":"ManagerDown","annotations":{"message":"Manager instance {{ \"{{ $labels.instance }}\" }} is down","summary":"Manager instance is down"},"expr":"sum(dragonfly_manager_version{}) == 0","for":"5m","labels":{"severity":"critical"}},{"alert":"ManagerHighNumberOfFailedGRPCRequest","annotations":{"message":"Manager has a high number of failed grpc request","summary":"Manager has a high number of failed grpc request"},"expr":"sum(rate(grpc_server_started_total{grpc_service=\"manager.Manager\",grpc_type=\"unary\"}[1m])) - sum(rate(grpc_server_handled_total{grpc_service=\"manager.Manager\",grpc_type=\"unary\",grpc_code=\"OK\"}[1m])) + sum(rate(grpc_server_handled_total{grpc_service=\"manager.Manager\",grpc_type=\"unary\",grpc_code=\"NotFound\"}[1m])) + sum(rate(grpc_server_handled_total{grpc_service=\"manager.Manager\",grpc_type=\"unary\",grpc_code=\"PermissionDenied\"}[1m])) + sum(rate(grpc_server_handled_total{grpc_service=\"manager.Manager\",grpc_type=\"unary\",grpc_code=\"InvalidArgument\"}[1m])) > 100","for":"1m","labels":{"severity":"warning"}},{"alert":"ManagerSuccessRateOfGRPCRequest","annotations":{"message":"Manager's success rate of grpc request is low","summary":"Manager's success rate of grpc request is low"},"expr":"(sum(rate(grpc_server_handled_total{grpc_service=\"manager.Manager\",grpc_type=\"unary\",grpc_code=\"OK\"}[1m])) + sum(rate(grpc_server_handled_total{grpc_service=\"manager.Manager\",grpc_type=\"unary\",grpc_code=\"NotFound\"}[1m])) + sum(rate(grpc_server_handled_total{grpc_service=\"manager.Manager\",grpc_type=\"unary\",grpc_code=\"PermissionDenied\"}[1m])) + sum(rate(grpc_server_handled_total{grpc_service=\"manager.Manager\",grpc_type=\"unary\",grpc_code=\"InvalidArgument\"}[1m]))) / sum(rate(grpc_server_started_total{grpc_service=\"manager.Manager\",grpc_type=\"unary\"}[1m])) < 0.6","for":"5m","labels":{"severity":"critical"}},{"alert":"ManagerHighNumberOfFailedRESTRequest","annotations":{"message":"Manager has a high number of failed rest request","summary":"Manager has a high number of failed rest request"},"expr":"sum(rate(dragonfly_manager_requests_total{}[1m])) - sum(rate(dragonfly_manager_requests_total{code=~\"[12]..\"}[1m])) > 100","for":"1m","labels":{"severity":"warning"}},{"alert":"ManagerSuccessRateOfRESTRequest","annotations":{"message":"Manager's success rate of rest request is low","summary":"Manager's success rate of rest request is low"},"expr":"sum(rate(dragonfly_manager_requests_total{code=~\"[12]..\"}[1m])) / sum(rate(dragonfly_manager_requests_total{}[1m])) < 0.6","for":"5m","labels":{"severity":"critical"}}]` | Prometheus rules. |
|
||||
|
@ -461,10 +325,11 @@ helm delete dragonfly --namespace dragonfly-system
|
|||
| manager.podLabels | object | `{}` | Pod labels. |
|
||||
| manager.priorityClassName | string | `""` | Pod priorityClassName. |
|
||||
| manager.replicas | int | `3` | Number of Pods to launch. |
|
||||
| manager.resources | object | `{"limits":{"cpu":"2","memory":"4Gi"},"requests":{"cpu":"0","memory":"0"}}` | Pod resource requests and limits. |
|
||||
| manager.resources | object | `{"limits":{"cpu":"8","memory":"16Gi"},"requests":{"cpu":"0","memory":"0"}}` | Pod resource requests and limits. |
|
||||
| manager.restPort | int | `8080` | REST service port. |
|
||||
| manager.service.annotations | object | `{}` | Service annotations. |
|
||||
| manager.service.labels | object | `{}` | Service labels. |
|
||||
| manager.service.nodePort | string | `""` | Service nodePort. |
|
||||
| manager.service.type | string | `"ClusterIP"` | Service type. |
|
||||
| manager.terminationGracePeriodSeconds | string | `nil` | Pod terminationGracePeriodSeconds. |
|
||||
| manager.tolerations | list | `[]` | List of node taints to tolerate. |
|
||||
|
@ -479,51 +344,31 @@ helm delete dragonfly --namespace dragonfly-system
|
|||
| mysql.migrate | bool | `true` | Running GORM migration. |
|
||||
| mysql.primary.service.port | int | `3306` | Mysql port. |
|
||||
| nameOverride | string | `""` | Override dragonfly name. |
|
||||
| namespaceOverride | string | `""` | Override dragonfly namespace. |
|
||||
| redis.auth.enabled | bool | `true` | Enable password authentication. |
|
||||
| redis.auth.password | string | `"dragonfly"` | Redis password. |
|
||||
| redis.clusterDomain | string | `"cluster.local"` | Cluster domain. |
|
||||
| redis.enable | bool | `true` | Enable redis cluster with docker container. |
|
||||
| redis.master.service.ports.redis | int | `6379` | Redis master service port. |
|
||||
| scheduler.config.console | bool | `false` | Console shows log on console. |
|
||||
| scheduler.config.console | bool | `true` | Console shows log on console. |
|
||||
| scheduler.config.dynconfig.refreshInterval | string | `"1m"` | Dynamic config refresh interval. |
|
||||
| scheduler.config.dynconfig.type | string | `"manager"` | Type is deprecated and is no longer used. Please remove it from your configuration. |
|
||||
| scheduler.config.host.idc | string | `""` | IDC is the idc of scheduler instance. |
|
||||
| scheduler.config.host.location | string | `""` | Location is the location of scheduler instance. |
|
||||
| scheduler.config.jaeger | string | `""` | |
|
||||
| scheduler.config.manager.keepAlive.interval | string | `"5s"` | Manager keepalive interval. |
|
||||
| scheduler.config.manager.schedulerClusterID | int | `1` | Associated scheduler cluster id. |
|
||||
| scheduler.config.network.enableIPv6 | bool | `false` | enableIPv6 enables ipv6. |
|
||||
| scheduler.config.pprofPort | int | `-1` | Listen port for pprof, only valid when the verbose option is true. default is -1. If it is 0, pprof will use a random port. |
|
||||
| scheduler.config.resource | object | `{"task":{"downloadTiny":{"scheme":"http","timeout":"1m","tls":{"insecureSkipVerify":true}}}}` | resource configuration. |
|
||||
| scheduler.config.resource.task | object | `{"downloadTiny":{"scheme":"http","timeout":"1m","tls":{"insecureSkipVerify":true}}}` | task configuration. |
|
||||
| scheduler.config.resource.task.downloadTiny | object | `{"scheme":"http","timeout":"1m","tls":{"insecureSkipVerify":true}}` | downloadTiny is the configuration of downloading tiny task by scheduler. |
|
||||
| scheduler.config.resource.task.downloadTiny.scheme | string | `"http"` | scheme is download tiny task scheme. |
|
||||
| scheduler.config.resource.task.downloadTiny.timeout | string | `"1m"` | timeout is http request timeout. |
|
||||
| scheduler.config.resource.task.downloadTiny.tls | object | `{"insecureSkipVerify":true}` | tls is download tiny task TLS configuration. |
|
||||
| scheduler.config.resource.task.downloadTiny.tls.insecureSkipVerify | bool | `true` | insecureSkipVerify controls whether a client verifies the server's certificate chain and hostname. |
|
||||
| scheduler.config.scheduler.algorithm | string | `"default"` | Algorithm configuration to use different scheduling algorithms, default configuration supports "default", "ml" and "nt". "default" is the rule-based scheduling algorithm, "ml" is the machine learning scheduling algorithm, "nt" is the rule-based and networkTopology-based scheduling algorithm. It also supports user plugin extension, the algorithm value is "plugin", and the compiled `d7y-scheduler-plugin-evaluator.so` file is added to the dragonfly working directory plugins. |
|
||||
| scheduler.config.pprofPort | int | `-1` | Listen port for pprof, default is -1 (meaning disabled). |
|
||||
| scheduler.config.scheduler.algorithm | string | `"default"` | Algorithm configuration to use different scheduling algorithms, default configuration supports "default", "ml" and "nt". "default" is the rule-based scheduling algorithm, "ml" is the machine learning scheduling algorithm. It also supports user plugin extension, the algorithm value is "plugin", and the compiled `d7y-scheduler-plugin-evaluator.so` file is added to the dragonfly working directory plugins. |
|
||||
| scheduler.config.scheduler.backToSourceCount | int | `200` | backToSourceCount is single task allows the peer to back-to-source count. |
|
||||
| scheduler.config.scheduler.gc.hostGCInterval | string | `"6h"` | hostGCInterval is the interval of host gc. |
|
||||
| scheduler.config.scheduler.gc.hostGCInterval | string | `"5m"` | hostGCInterval is the interval of host gc. |
|
||||
| scheduler.config.scheduler.gc.hostTTL | string | `"1h"` | hostTTL is time to live of host. If host announces message to scheduler, then HostTTl will be reset. |
|
||||
| scheduler.config.scheduler.gc.peerGCInterval | string | `"10s"` | peerGCInterval is the interval of peer gc. |
|
||||
| scheduler.config.scheduler.gc.peerTTL | string | `"24h"` | peerTTL is the ttl of peer. If the peer has been downloaded by other peers, then PeerTTL will be reset. |
|
||||
| scheduler.config.scheduler.gc.peerGCInterval | string | `"5m"` | peerGCInterval is the interval of peer gc. |
|
||||
| scheduler.config.scheduler.gc.peerTTL | string | `"720h"` | peerTTL is the ttl of peer. If the peer has been downloaded by other peers, then PeerTTL will be reset. |
|
||||
| scheduler.config.scheduler.gc.pieceDownloadTimeout | string | `"30m"` | pieceDownloadTimeout is the timeout of downloading piece. |
|
||||
| scheduler.config.scheduler.gc.taskGCInterval | string | `"30m"` | taskGCInterval is the interval of task gc. If all the peers have been reclaimed in the task, then the task will also be reclaimed. |
|
||||
| scheduler.config.scheduler.networkTopology.cache.interval | string | `"5m"` | interval is cache cleanup interval. |
|
||||
| scheduler.config.scheduler.networkTopology.cache.ttl | string | `"5m"` | ttl is networkTopology cache items ttl. |
|
||||
| scheduler.config.scheduler.networkTopology.collectInterval | string | `"2h"` | collectInterval is the interval of collecting network topology. |
|
||||
| scheduler.config.scheduler.networkTopology.probe.count | int | `10` | count is the number of probing hosts. |
|
||||
| scheduler.config.scheduler.networkTopology.probe.queueLength | int | `5` | queueLength is the length of probe queue. |
|
||||
| scheduler.config.scheduler.retryBackToSourceLimit | int | `5` | retryBackToSourceLimit reaches the limit, then the peer back-to-source. |
|
||||
| scheduler.config.scheduler.retryInterval | string | `"700ms"` | Retry scheduling interval. |
|
||||
| scheduler.config.scheduler.retryLimit | int | `7` | Retry scheduling limit times. |
|
||||
| scheduler.config.security.autoIssueCert | bool | `false` | AutoIssueCert indicates to issue client certificates for all grpc call. If AutoIssueCert is false, any other option in Security will be ignored. |
|
||||
| scheduler.config.security.caCert | string | `""` | CACert is the root CA certificate for all grpc tls handshake, it can be path or PEM format string. |
|
||||
| scheduler.config.security.certSpec.dnsNames | list | `["dragonfly-scheduler","dragonfly-scheduler.dragonfly-system.svc","dragonfly-scheduler.dragonfly-system.svc.cluster.local"]` | DNSNames is a list of dns names be set on the certificate. |
|
||||
| scheduler.config.security.certSpec.ipAddresses | string | `nil` | IPAddresses is a list of ip addresses be set on the certificate. |
|
||||
| scheduler.config.security.certSpec.validityPeriod | string | `"4320h"` | ValidityPeriod is the validity period of certificate. |
|
||||
| scheduler.config.security.tlsPolicy | string | `"prefer"` | TLSPolicy controls the grpc shandshake behaviors: force: both ClientHandshake and ServerHandshake are only support tls. prefer: ServerHandshake supports tls and insecure (non-tls), ClientHandshake will only support tls. default: ServerHandshake supports tls and insecure (non-tls), ClientHandshake will only support insecure (non-tls). Notice: If the drgaonfly service has been deployed, a two-step upgrade is required. The first step is to set tlsPolicy to default, and then upgrade the dragonfly services. The second step is to set tlsPolicy to prefer, and tthen completely upgrade the dragonfly services. |
|
||||
| scheduler.config.security.tlsVerify | bool | `false` | TLSVerify indicates to verify certificates. |
|
||||
| scheduler.config.scheduler.retryBackToSourceLimit | int | `3` | retryBackToSourceLimit reaches the limit, then the peer back-to-source. |
|
||||
| scheduler.config.scheduler.retryInterval | string | `"1s"` | Retry scheduling interval. |
|
||||
| scheduler.config.scheduler.retryLimit | int | `5` | Retry scheduling limit times. |
|
||||
| scheduler.config.seedPeer.enable | bool | `true` | scheduler enable seed peer as P2P peer, if the value is false, P2P network will not be back-to-source through seed peer but by dfdaemon and preheat feature does not work. |
|
||||
| scheduler.config.server.advertiseIP | string | `""` | Advertise ip. |
|
||||
| scheduler.config.server.advertisePort | int | `8002` | Advertise port. |
|
||||
|
@ -531,32 +376,32 @@ helm delete dragonfly --namespace dragonfly-system
|
|||
| scheduler.config.server.dataDir | string | `""` | Storage directory. |
|
||||
| scheduler.config.server.listenIP | string | `"0.0.0.0"` | Listen ip. |
|
||||
| scheduler.config.server.logDir | string | `""` | Log directory. |
|
||||
| scheduler.config.server.logLevel | string | `"info"` | logLevel specifies the logging level for the scheduler. Default: "info" Supported values: "debug", "info", "warn", "error", "panic", "fatal" |
|
||||
| scheduler.config.server.pluginDir | string | `""` | Plugin directory. |
|
||||
| scheduler.config.server.port | int | `8002` | Server port. |
|
||||
| scheduler.config.server.workHome | string | `""` | Work directory. |
|
||||
| scheduler.config.storage.bufferSize | int | `100` | bufferSize sets the size of buffer container, if the buffer is full, write all the records in the buffer to the file. |
|
||||
| scheduler.config.storage.maxBackups | int | `10` | maxBackups sets the maximum number of storage files to retain. |
|
||||
| scheduler.config.storage.maxSize | int | `100` | maxSize sets the maximum size in megabytes of storage file. |
|
||||
| scheduler.config.verbose | bool | `false` | Whether to enable debug level logger and enable pprof. |
|
||||
| scheduler.config.tracing.protocol | string | `""` | Protocol specifies the communication protocol for the tracing server. Supported values: "http", "https", "grpc" (default: None). This determines how tracing logs are transmitted to the server. |
|
||||
| scheduler.containerPort | int | `8002` | Pod containerPort. |
|
||||
| scheduler.enable | bool | `true` | Enable scheduler. |
|
||||
| scheduler.extraVolumeMounts | list | `[{"mountPath":"/var/log/dragonfly/scheduler","name":"logs"}]` | Extra volumeMounts for scheduler. |
|
||||
| scheduler.extraVolumes | list | `[{"emptyDir":{},"name":"logs"}]` | Extra volumes for scheduler. |
|
||||
| scheduler.fullnameOverride | string | `""` | Override scheduler fullname. |
|
||||
| scheduler.hostAliases | list | `[]` | Host Aliases. |
|
||||
| scheduler.hostNetwork | bool | `false` | hostNetwork specify if host network should be enabled. |
|
||||
| scheduler.image.digest | string | `""` | Image digest. |
|
||||
| scheduler.image.pullPolicy | string | `"IfNotPresent"` | Image pull policy. |
|
||||
| scheduler.image.pullSecrets | list | `[]` (defaults to global.imagePullSecrets). | Image pull secrets. |
|
||||
| scheduler.image.registry | string | `"docker.io"` | Image registry. |
|
||||
| scheduler.image.repository | string | `"dragonflyoss/scheduler"` | Image repository. |
|
||||
| scheduler.image.tag | string | `"v2.1.44"` | Image tag. |
|
||||
| scheduler.image.tag | string | `"v2.3.1-rc.2"` | Image tag. |
|
||||
| scheduler.initContainer.image.digest | string | `""` | Image digest. |
|
||||
| scheduler.initContainer.image.pullPolicy | string | `"IfNotPresent"` | Image pull policy. |
|
||||
| scheduler.initContainer.image.registry | string | `"docker.io"` | Image registry. |
|
||||
| scheduler.initContainer.image.repository | string | `"busybox"` | Image repository. |
|
||||
| scheduler.initContainer.image.tag | string | `"latest"` | Image tag. |
|
||||
| scheduler.initContainer.resources | object | `{"limits":{"cpu":"2","memory":"4Gi"},"requests":{"cpu":"0","memory":"0"}}` | Pod resource requests and limits. |
|
||||
| scheduler.maxProcs | string | `""` | maxProcs Limits the number of operating system threads that can execute user-level. Go code simultaneously by setting GOMAXPROCS environment variable, refer to https://golang.org/pkg/runtime. |
|
||||
| scheduler.metrics.enable | bool | `false` | Enable scheduler metrics. |
|
||||
| scheduler.metrics.enable | bool | `true` | Enable scheduler metrics. |
|
||||
| scheduler.metrics.enableHost | bool | `false` | Enable host metrics. |
|
||||
| scheduler.metrics.prometheusRule.additionalLabels | object | `{}` | Additional labels. |
|
||||
| scheduler.metrics.prometheusRule.enable | bool | `false` | Enable prometheus rule ref: https://github.com/coreos/prometheus-operator. |
|
||||
|
@ -575,69 +420,78 @@ helm delete dragonfly --namespace dragonfly-system
|
|||
| scheduler.podLabels | object | `{}` | Pod labels. |
|
||||
| scheduler.priorityClassName | string | `""` | Pod priorityClassName. |
|
||||
| scheduler.replicas | int | `3` | Number of Pods to launch. |
|
||||
| scheduler.resources | object | `{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"0","memory":"0"}}` | Pod resource requests and limits. |
|
||||
| scheduler.resources | object | `{"limits":{"cpu":"8","memory":"16Gi"},"requests":{"cpu":"0","memory":"0"}}` | Pod resource requests and limits. |
|
||||
| scheduler.service.annotations | object | `{}` | Service annotations. |
|
||||
| scheduler.service.labels | object | `{}` | Service labels. |
|
||||
| scheduler.service.nodePort | string | `""` | Service nodePort. |
|
||||
| scheduler.service.type | string | `"ClusterIP"` | Service type. |
|
||||
| scheduler.statefulsetAnnotations | object | `{}` | Statefulset annotations. |
|
||||
| scheduler.terminationGracePeriodSeconds | string | `nil` | Pod terminationGracePeriodSeconds. |
|
||||
| scheduler.tolerations | list | `[]` | List of node taints to tolerate. |
|
||||
| scheduler.updateStrategy | object | `{}` | Update strategy for replicas. |
|
||||
| seedClient.config.console | bool | `true` | console prints log. |
|
||||
| seedClient.config.download.collectedPieceTimeout | string | `"10s"` | collected_piece_timeout is the timeout for collecting one piece from the parent in the stream. |
|
||||
| seedClient.config.download.concurrentPieceCount | int | `16` | concurrentPieceCount is the number of concurrent pieces to download. |
|
||||
| seedClient.config.download.pieceTimeout | string | `"30s"` | pieceTimeout is the timeout for downloading a piece from source. |
|
||||
| seedClient.config.download.rateLimit | int | `20000000000` | rateLimit is the default rate limit of the download speed in bps(bytes per second), default is 20Gbps. |
|
||||
| seedClient.config.download.pieceTimeout | string | `"40s"` | pieceTimeout is the timeout for downloading a piece from source. |
|
||||
| seedClient.config.download.rateLimit | string | `"50GiB"` | rateLimit is the default rate limit of the download speed in GiB/Mib/Kib per second, default is 50GiB/s. |
|
||||
| seedClient.config.download.server.requestRateLimit | int | `4000` | request_rate_limit is the rate limit of the download request in the download grpc server, default is 4000 req/s. |
|
||||
| seedClient.config.download.server.socketPath | string | `"/var/run/dragonfly/dfdaemon.sock"` | socketPath is the unix socket path for dfdaemon GRPC service. |
|
||||
| seedClient.config.dynconfig.refreshInterval | string | `"1m"` | refreshInterval is the interval to refresh dynamic configuration from manager. |
|
||||
| seedClient.config.gc.interval | string | `"900s"` | interval is the interval to do gc. |
|
||||
| seedClient.config.gc.policy.distHighThresholdPercent | int | `80` | distHighThresholdPercent is the high threshold percent of the disk usage. If the disk usage is greater than the threshold, dfdaemon will do gc. |
|
||||
| seedClient.config.gc.policy.distLowThresholdPercent | int | `60` | distLowThresholdPercent is the low threshold percent of the disk usage. If the disk usage is less than the threshold, dfdaemon will stop gc. |
|
||||
| seedClient.config.gc.policy.taskTTL | string | `"168h"` | taskTTL is the ttl of the task. |
|
||||
| seedClient.config.gc.policy.taskTTL | string | `"720h"` | taskTTL is the ttl of the task. |
|
||||
| seedClient.config.health.server.port | int | `4003` | port is the port to the health server. |
|
||||
| seedClient.config.host | object | `{"idc":"","location":""}` | host is the host configuration for dfdaemon. |
|
||||
| seedClient.config.log.level | string | `"info"` | Specify the logging level [trace, debug, info, warn, error] |
|
||||
| seedClient.config.manager.addrs | list | `[]` | addrs is manager addresses. |
|
||||
| seedClient.config.manager.addr | string | `""` | addr is manager address. |
|
||||
| seedClient.config.metrics.server.port | int | `4002` | port is the port to the metrics server. |
|
||||
| seedClient.config.proxy.disableBackToSource | bool | `false` | disableBackToSource indicates whether disable to download back-to-source when download failed. |
|
||||
| seedClient.config.proxy.prefetch | bool | `false` | prefetch pre-downloads full of the task when download with range request. |
|
||||
| seedClient.config.proxy.readBufferSize | int | `32768` | readBufferSize is the buffer size for reading piece from disk, default is 32KB. |
|
||||
| seedClient.config.proxy.prefetch | bool | `true` | prefetch pre-downloads full of the task when download with range request. `X-Dragonfly-Prefetch` header's priority is higher than prefetch in config. If the value is "true", the range request will prefetch the entire file. If the value is "false", the range request will fetch the range content. |
|
||||
| seedClient.config.proxy.prefetchRateLimit | string | `"5GiB"` | prefetchRateLimit is the rate limit of prefetching in GiB/Mib/Kib per second, default is 5GiB/s. The prefetch request has lower priority so limit the rate to avoid occupying the bandwidth impact other download tasks. |
|
||||
| seedClient.config.proxy.readBufferSize | int | `4194304` | readBufferSize is the buffer size for reading piece from disk, default is 4MiB. |
|
||||
| seedClient.config.proxy.registryMirror.addr | string | `"https://index.docker.io"` | addr is the default address of the registry mirror. Proxy will start a registry mirror service for the client to pull the image. The client can use the default address of the registry mirror in configuration to pull the image. The `X-Dragonfly-Registry` header can instead of the default address of registry mirror. |
|
||||
| seedClient.config.proxy.rules | list | `[{"regex":"blobs/sha256.*"}]` | rules is the list of rules for the proxy server. regex is the regex of the request url. useTLS indicates whether use tls for the proxy backend. redirect is the redirect url. filteredQueryParams is the filtered query params to generate the task id. When filter is ["Signature", "Expires", "ns"], for example: http://example.com/xyz?Expires=e1&Signature=s1&ns=docker.io and http://example.com/xyz?Expires=e2&Signature=s2&ns=docker.io will generate the same task id. Default value includes the filtered query params of s3, gcs, oss, obs, cos. |
|
||||
| seedClient.config.proxy.rules | list | `[{"regex":"blobs/sha256.*"}]` | rules is the list of rules for the proxy server. regex is the regex of the request url. useTLS indicates whether use tls for the proxy backend. redirect is the redirect url. filteredQueryParams is the filtered query params to generate the task id. When filter is ["Signature", "Expires", "ns"], for example: http://example.com/xyz?Expires=e1&Signature=s1&ns=docker.io and http://example.com/xyz?Expires=e2&Signature=s2&ns=docker.io will generate the same task id. Default value includes the filtered query params of s3, gcs, oss, obs, cos. `X-Dragonfly-Use-P2P` header can instead of the regular expression of the rule. If the value is "true", the request will use P2P technology to distribute the content. If the value is "false", but url matches the regular expression in rules. The request will also use P2P technology to distribute the content. |
|
||||
| seedClient.config.proxy.server.port | int | `4001` | port is the port to the proxy server. |
|
||||
| seedClient.config.scheduler.announceInterval | string | `"1m"` | announceInterval is the interval to announce peer to the scheduler. Announcer will provide the scheduler with peer information for scheduling, peer information includes cpu, memory, etc. |
|
||||
| seedClient.config.scheduler.maxScheduleCount | int | `5` | maxScheduleCount is the max count of schedule. |
|
||||
| seedClient.config.scheduler.scheduleTimeout | string | `"30s"` | scheduleTimeout is the timeout for scheduling. If the scheduling timesout, dfdaemon will back-to-source download if enableBackToSource is true, otherwise dfdaemon will return download failed. |
|
||||
| seedClient.config.security.enable | bool | `false` | enable indicates whether enable security. |
|
||||
| seedClient.config.scheduler.scheduleTimeout | string | `"3h"` | scheduleTimeout is timeout for the scheduler to respond to a scheduling request from dfdaemon, default is 3 hours. If the scheduler's response time for a scheduling decision exceeds this timeout, dfdaemon will encounter a `TokioStreamElapsed(Elapsed(()))` error. Behavior upon timeout: - If `enable_back_to_source` is `true`, dfdaemon will attempt to download directly from the source. - Otherwise (if `enable_back_to_source` is `false`), dfdaemon will report a download failure. **Important Considerations Regarding Timeout Triggers**: This timeout isn't solely for the scheduler's direct response. It can also be triggered if the overall duration of the client's interaction with the scheduler for a task (e.g., client downloading initial pieces and reporting their status back to the scheduler) exceeds `schedule_timeout`. During such client-side processing and reporting, the scheduler might be awaiting these updates before sending its comprehensive scheduling response, and this entire period is subject to the `schedule_timeout`. **Configuration Guidance**: To prevent premature timeouts, `schedule_timeout` should be configured to a value greater than the maximum expected time for the *entire scheduling interaction*. This includes: 1. The scheduler's own processing and response time. 2. The time taken by the client to download any initial pieces and download all pieces finished, as this communication is part of the scheduling phase. Setting this value too low can lead to `TokioStreamElapsed` errors even if the network and scheduler are functioning correctly but the combined interaction time is longer than the configured timeout. |
|
||||
| seedClient.config.seedPeer.clusterID | int | `1` | clusterID is the cluster id of the seed peer cluster. |
|
||||
| seedClient.config.seedPeer.enable | bool | `true` | enable indicates whether enable seed peer. |
|
||||
| seedClient.config.seedPeer.keepaliveInterval | string | `"15s"` | keepaliveInterval is the interval to keep alive with manager. |
|
||||
| seedClient.config.seedPeer.type | string | `"super"` | type is the type of seed peer. |
|
||||
| seedClient.config.server.cacheDir | string | `"/var/cache/dragonfly/dfdaemon/"` | cacheDir is the directory to store cache files. |
|
||||
| seedClient.config.server.pluginDir | string | `"/var/lib/dragonfly/plugins/dfdaemon/"` | pluginDir is the directory to store plugins. |
|
||||
| seedClient.config.server.pluginDir | string | `"/usr/local/lib/dragonfly/plugins/dfdaemon/"` | pluginDir is the directory to store plugins. |
|
||||
| seedClient.config.stats.server.port | int | `4004` | port is the port to the stats server. |
|
||||
| seedClient.config.storage.dir | string | `"/var/lib/dragonfly/"` | dir is the directory to store task's metadata and content. |
|
||||
| seedClient.config.storage.readBufferSize | int | `131072` | readBufferSize is the buffer size for reading piece from disk, default is 128KB. |
|
||||
| seedClient.config.storage.writeBufferSize | int | `131072` | writeBufferSize is the buffer size for writing piece to disk, default is 128KB. |
|
||||
| seedClient.config.upload.rateLimit | int | `20000000000` | rateLimit is the default rate limit of the upload speed in bps(bytes per second), default is 20Gbps. |
|
||||
| seedClient.config.storage.keep | bool | `true` | keep indicates whether keep the task's metadata and content when the dfdaemon restarts. |
|
||||
| seedClient.config.storage.readBufferSize | int | `4194304` | readBufferSize is the buffer size for reading piece from disk, default is 4MiB. |
|
||||
| seedClient.config.storage.writeBufferSize | int | `4194304` | writeBufferSize is the buffer size for writing piece to disk, default is 4MiB. |
|
||||
| seedClient.config.storage.writePieceTimeout | string | `"30s"` | writePieceTimeout is the timeout for writing a piece to storage(e.g., disk or cache). |
|
||||
| seedClient.config.tracing.protocol | string | `""` | Protocol specifies the communication protocol for the tracing server. Supported values: "http", "https", "grpc" (default: None). This determines how tracing logs are transmitted to the server. |
|
||||
| seedClient.config.upload.rateLimit | string | `"50GiB"` | rateLimit is the default rate limit of the upload speed in GiB/Mib/Kib per second, default is 50GiB/s. |
|
||||
| seedClient.config.upload.server.port | int | `4000` | port is the port to the grpc server. |
|
||||
| seedClient.config.verbose | bool | `false` | verbose prints log. |
|
||||
| seedClient.enable | bool | `false` | Enable seed client. |
|
||||
| seedClient.config.upload.server.requestRateLimit | int | `4000` | request_rate_limit is the rate limit of the upload request in the upload grpc server, default is 4000 req/s. |
|
||||
| seedClient.enable | bool | `true` | Enable seed client. |
|
||||
| seedClient.extraVolumeMounts | list | `[{"mountPath":"/var/log/dragonfly/dfdaemon/","name":"logs"}]` | Extra volumeMounts for dfdaemon. |
|
||||
| seedClient.extraVolumes | list | `[{"emptyDir":{},"name":"logs"}]` | Extra volumes for dfdaemon. |
|
||||
| seedClient.fullnameOverride | string | `""` | Override scheduler fullname. |
|
||||
| seedClient.hostAliases | list | `[]` | Host Aliases. |
|
||||
| seedClient.hostNetwork | bool | `false` | hostNetwork specify if host network should be enabled. |
|
||||
| seedClient.image.digest | string | `""` | Image digest. |
|
||||
| seedClient.image.pullPolicy | string | `"IfNotPresent"` | Image pull policy. |
|
||||
| seedClient.image.pullSecrets | list | `[]` (defaults to global.imagePullSecrets). | Image pull secrets. |
|
||||
| seedClient.image.registry | string | `"docker.io"` | Image registry. |
|
||||
| seedClient.image.repository | string | `"dragonflyoss/client"` | Image repository. |
|
||||
| seedClient.image.tag | string | `"v0.1.62"` | Image tag. |
|
||||
| seedClient.image.tag | string | `"v1.0.9"` | Image tag. |
|
||||
| seedClient.initContainer.image.digest | string | `""` | Image digest. |
|
||||
| seedClient.initContainer.image.pullPolicy | string | `"IfNotPresent"` | Image pull policy. |
|
||||
| seedClient.initContainer.image.registry | string | `"docker.io"` | Image registry. |
|
||||
| seedClient.initContainer.image.repository | string | `"busybox"` | Image repository. |
|
||||
| seedClient.initContainer.image.tag | string | `"latest"` | Image tag. |
|
||||
| seedClient.initContainer.resources | object | `{"limits":{"cpu":"2","memory":"4Gi"},"requests":{"cpu":"0","memory":"0"}}` | Pod resource requests and limits. |
|
||||
| seedClient.maxProcs | string | `""` | maxProcs Limits the number of operating system threads that can execute user-level. Go code simultaneously by setting GOMAXPROCS environment variable, refer to https://golang.org/pkg/runtime. |
|
||||
| seedClient.metrics.enable | bool | `false` | Enable seed client metrics. |
|
||||
| seedClient.metrics.enable | bool | `true` | Enable seed client metrics. |
|
||||
| seedClient.metrics.prometheusRule.additionalLabels | object | `{}` | Additional labels. |
|
||||
| seedClient.metrics.prometheusRule.enable | bool | `false` | Enable prometheus rule ref: https://github.com/coreos/prometheus-operator. |
|
||||
| seedClient.metrics.prometheusRule.rules | list | `[{"alert":"SeedClientDown","annotations":{"message":"Seed client instance {{ \"{{ $labels.instance }}\" }} is down","summary":"Seed client instance is down"},"expr":"sum(dragonfly_client_version{container=\"seed-client\"}) == 0","for":"5m","labels":{"severity":"critical"}},{"alert":"SeedClientHighNumberOfFailedDownloadTask","annotations":{"message":"Seed client has a high number of failed download task","summary":"Seed client has a high number of failed download task"},"expr":"sum(irate(dragonfly_client_download_task_failure_total{container=\"seed-client\"}[1m])) > 100","for":"1m","labels":{"severity":"warning"}},{"alert":"SeedClientSuccessRateOfDownloadingTask","annotations":{"message":"Seed client's success rate of downloading task is low","summary":"Seed client's success rate of downloading task is low"},"expr":"(sum(rate(dragonfly_client_download_task_total{container=\"seed-client\"}[1m])) - sum(rate(dragonfly_client_download_task_failure_total{container=\"seed-client\"}[1m]))) / sum(rate(dragonfly_client_download_task_total{container=\"seed-client\"}[1m])) < 0.6","for":"5m","labels":{"severity":"critical"}}]` | Prometheus rules. |
|
||||
|
@ -654,218 +508,24 @@ helm delete dragonfly --namespace dragonfly-system
|
|||
| seedClient.persistence.accessModes | list | `["ReadWriteOnce"]` | Persistence access modes. |
|
||||
| seedClient.persistence.annotations | object | `{}` | Persistence annotations. |
|
||||
| seedClient.persistence.enable | bool | `true` | Enable persistence for seed peer. |
|
||||
| seedClient.persistence.size | string | `"50Gi"` | Persistence persistence size. |
|
||||
| seedClient.persistence.size | string | `"100Gi"` | Persistence persistence size. |
|
||||
| seedClient.podAnnotations | object | `{}` | Pod annotations. |
|
||||
| seedClient.podLabels | object | `{}` | Pod labels. |
|
||||
| seedClient.priorityClassName | string | `""` | Pod priorityClassName. |
|
||||
| seedClient.replicas | int | `3` | Number of Pods to launch. |
|
||||
| seedClient.resources | object | `{"limits":{"cpu":"2","memory":"4Gi"},"requests":{"cpu":"0","memory":"0"}}` | Pod resource requests and limits. |
|
||||
| seedClient.resources | object | `{"limits":{"cpu":"8","memory":"16Gi"},"requests":{"cpu":"0","memory":"0"}}` | Pod resource requests and limits. |
|
||||
| seedClient.service.annotations | object | `{}` | Service annotations. |
|
||||
| seedClient.service.labels | object | `{}` | Service labels. |
|
||||
| seedClient.service.nodePort | string | `""` | Service nodePort. |
|
||||
| seedClient.service.type | string | `"ClusterIP"` | Service type. |
|
||||
| seedClient.statefulsetAnnotations | object | `{}` | Statefulset annotations. |
|
||||
| seedClient.terminationGracePeriodSeconds | string | `nil` | Pod terminationGracePeriodSeconds. |
|
||||
| seedClient.tolerations | list | `[]` | List of node taints to tolerate. |
|
||||
| seedClient.updateStrategy | object | `{}` | Update strategy for replicas. |
|
||||
| seedPeer.config.aliveTime | string | `"0s"` | Daemon alive time, when sets 0s, daemon will not auto exit, it is useful for longtime running. |
|
||||
| seedPeer.config.announcer.schedulerInterval | string | `"30s"` | schedulerInterval is the interval of announcing scheduler. Announcer will provide the scheduler with peer information for scheduling. Peer information includes cpu, memory, etc. |
|
||||
| seedPeer.config.cacheDir | string | `""` | Dynconfig cache directory. |
|
||||
| seedPeer.config.console | bool | `false` | Console shows log on console. |
|
||||
| seedPeer.config.dataDir | string | `"/var/lib/dragonfly"` | Daemon data storage directory. |
|
||||
| seedPeer.config.download.calculateDigest | bool | `false` | Calculate digest, when only pull images, can be false to save cpu and memory. |
|
||||
| seedPeer.config.download.downloadGRPC.security | object | `{"insecure":true,"tlsVerify":true}` | Download grpc security option. |
|
||||
| seedPeer.config.download.downloadGRPC.unixListen | object | `{"socket":""}` | Download service listen address. current, only support unix domain socket. |
|
||||
| seedPeer.config.download.peerGRPC.security | object | `{"insecure":true}` | Peer grpc security option. |
|
||||
| seedPeer.config.download.peerGRPC.tcpListen.port | int | `65000` | Listen port. |
|
||||
| seedPeer.config.download.perPeerRateLimit | string | `"1024Mi"` | Per peer task limit per second[B]. |
|
||||
| seedPeer.config.download.prefetch | bool | `false` | When request data with range header, prefetch data not in range. |
|
||||
| seedPeer.config.download.totalRateLimit | string | `"2048Mi"` | Total download limit per second[B]. |
|
||||
| seedPeer.config.gcInterval | string | `"1m0s"` | Daemon gc task running interval. |
|
||||
| seedPeer.config.health.path | string | `"/server/ping"` | |
|
||||
| seedPeer.config.health.tcpListen.port | int | `40901` | |
|
||||
| seedPeer.config.host.idc | string | `""` | IDC deployed by daemon. |
|
||||
| seedPeer.config.host.location | string | `""` | Geographical location, separated by "|" characters. |
|
||||
| seedPeer.config.jaeger | string | `""` | |
|
||||
| seedPeer.config.keepStorage | bool | `false` | When daemon exit, keep peer task data or not. it is usefully when upgrade daemon service, all local cache will be saved. default is false. |
|
||||
| seedPeer.config.logDir | string | `""` | Log directory. |
|
||||
| seedPeer.config.network.enableIPv6 | bool | `false` | enableIPv6 enables ipv6. |
|
||||
| seedPeer.config.networkTopology.enable | bool | `false` | Enable networkTopology service. |
|
||||
| seedPeer.config.networkTopology.probe.interval | string | `"20m"` | interval is the interval of probing hosts. |
|
||||
| seedPeer.config.objectStorage.enable | bool | `false` | Enable object storage service. |
|
||||
| seedPeer.config.objectStorage.filter | string | `"Expires&Signature&ns"` | Filter is used to generate a unique Task ID by filtering unnecessary query params in the URL, it is separated by & character. When filter: "Expires&Signature&ns", for example: http://localhost/xyz?Expires=111&Signature=222&ns=docker.io and http://localhost/xyz?Expires=333&Signature=999&ns=docker.io is same task. |
|
||||
| seedPeer.config.objectStorage.maxReplicas | int | `3` | MaxReplicas is the maximum number of replicas of an object cache in seed peers. |
|
||||
| seedPeer.config.objectStorage.security | object | `{"insecure":true,"tlsVerify":true}` | Object storage service security option. |
|
||||
| seedPeer.config.objectStorage.tcpListen.port | int | `65004` | Listen port. |
|
||||
| seedPeer.config.pluginDir | string | `""` | Plugin directory. |
|
||||
| seedPeer.config.pprofPort | int | `-1` | Listen port for pprof, only valid when the verbose option is true default is -1. If it is 0, pprof will use a random port. |
|
||||
| seedPeer.config.proxy.defaultFilter | string | `"Expires&Signature&ns"` | Filter for hash url. when defaultFilter: "Expires&Signature&ns", for example: http://localhost/xyz?Expires=111&Signature=222&ns=docker.io and http://localhost/xyz?Expires=333&Signature=999&ns=docker.io is same task, it is also possible to override the default filter by adding the X-Dragonfly-Filter header through the proxy. |
|
||||
| seedPeer.config.proxy.defaultTag | string | `""` | Tag the task. when the value of the default tag is different, the same download url can be divided into different tasks according to the tag, it is also possible to override the default tag by adding the X-Dragonfly-Tag header through the proxy. |
|
||||
| seedPeer.config.proxy.proxies[0] | object | `{"regx":"blobs/sha256.*"}` | Proxy all http image layer download requests with dfget. |
|
||||
| seedPeer.config.proxy.registryMirror.dynamic | bool | `true` | When enabled, use value of "X-Dragonfly-Registry" in http header for remote instead of url host. |
|
||||
| seedPeer.config.proxy.registryMirror.insecure | bool | `false` | When the cert of above url is secure, set insecure to true. |
|
||||
| seedPeer.config.proxy.registryMirror.url | string | `"https://index.docker.io"` | URL for the registry mirror. |
|
||||
| seedPeer.config.proxy.security | object | `{"insecure":true,"tlsVerify":false}` | Proxy security option. |
|
||||
| seedPeer.config.proxy.tcpListen.namespace | string | `"/run/dragonfly/net"` | Namespace stands the linux net namespace, like /proc/1/ns/net. it's useful for running daemon in pod with ip allocated and listening the special port in host net namespace. Linux only. |
|
||||
| seedPeer.config.scheduler | object | `{"disableAutoBackSource":false,"manager":{"enable":true,"netAddrs":null,"refreshInterval":"10m","seedPeer":{"clusterID":1,"enable":true,"keepAlive":{"interval":"5s"},"type":"super"}},"scheduleTimeout":"30s"}` | Scheduler config, netAddrs is auto-configured in templates/dfdaemon/dfdaemon-configmap.yaml. |
|
||||
| seedPeer.config.scheduler.disableAutoBackSource | bool | `false` | Disable auto back source in dfdaemon. |
|
||||
| seedPeer.config.scheduler.manager.enable | bool | `true` | Get scheduler list dynamically from manager. |
|
||||
| seedPeer.config.scheduler.manager.netAddrs | string | `nil` | Manager service address, netAddr is a list, there are two fields type and addr. |
|
||||
| seedPeer.config.scheduler.manager.refreshInterval | string | `"10m"` | Scheduler list refresh interval. |
|
||||
| seedPeer.config.scheduler.manager.seedPeer.clusterID | int | `1` | Associated seed peer cluster id. |
|
||||
| seedPeer.config.scheduler.manager.seedPeer.enable | bool | `true` | Enable seed peer mode. |
|
||||
| seedPeer.config.scheduler.manager.seedPeer.keepAlive.interval | string | `"5s"` | Manager keepalive interval. |
|
||||
| seedPeer.config.scheduler.manager.seedPeer.type | string | `"super"` | Seed peer supports "super", "strong" and "weak" types. |
|
||||
| seedPeer.config.scheduler.scheduleTimeout | string | `"30s"` | Schedule timeout. |
|
||||
| seedPeer.config.security.autoIssueCert | bool | `false` | AutoIssueCert indicates to issue client certificates for all grpc call. If AutoIssueCert is false, any other option in Security will be ignored. |
|
||||
| seedPeer.config.security.caCert | string | `""` | CACert is the root CA certificate for all grpc tls handshake, it can be path or PEM format string. |
|
||||
| seedPeer.config.security.certSpec.dnsNames | list | `["dragonfly-seed-peer","dragonfly-seed-peer.dragonfly-system.svc","dragonfly-seed-peer.dragonfly-system.svc.cluster.local"]` | DNSNames is a list of dns names be set on the certificate. |
|
||||
| seedPeer.config.security.certSpec.ipAddresses | string | `nil` | IPAddresses is a list of ip addresses be set on the certificate. |
|
||||
| seedPeer.config.security.certSpec.validityPeriod | string | `"4320h"` | ValidityPeriod is the validity period of certificate. |
|
||||
| seedPeer.config.security.tlsPolicy | string | `"prefer"` | TLSPolicy controls the grpc shandshake behaviors: force: both ClientHandshake and ServerHandshake are only support tls. prefer: ServerHandshake supports tls and insecure (non-tls), ClientHandshake will only support tls. default: ServerHandshake supports tls and insecure (non-tls), ClientHandshake will only support insecure (non-tls). Notice: If the drgaonfly service has been deployed, a two-step upgrade is required. The first step is to set tlsPolicy to default, and then upgrade the dragonfly services. The second step is to set tlsPolicy to prefer, and tthen completely upgrade the dragonfly services. |
|
||||
| seedPeer.config.security.tlsVerify | bool | `false` | TLSVerify indicates to verify certificates. |
|
||||
| seedPeer.config.storage.diskGCThresholdPercent | int | `90` | Disk GC Threshold Percent, when the disk usage is above 90%, start to gc the oldest tasks. |
|
||||
| seedPeer.config.storage.multiplex | bool | `true` | Set to ture for reusing underlying storage for same task id. |
|
||||
| seedPeer.config.storage.strategy | string | `"io.d7y.storage.v2.simple"` | Storage strategy when process task data. io.d7y.storage.v2.simple : download file to data directory first, then copy to output path, this is default action. the download file in date directory will be the peer data for uploading to other peers. io.d7y.storage.v2.advance: download file directly to output path with postfix, hard link to final output, avoid copy to output path, fast than simple strategy, but: the output file with postfix will be the peer data for uploading to other peers. when user delete or change this file, this peer data will be corrupted. default is io.d7y.storage.v2.advance. |
|
||||
| seedPeer.config.storage.taskExpireTime | string | `"6h"` | Task data expire time. when there is no access to a task data, this task will be gc. |
|
||||
| seedPeer.config.upload.rateLimit | string | `"2048Mi"` | Upload limit per second[B]. |
|
||||
| seedPeer.config.upload.security | object | `{"insecure":true,"tlsVerify":false}` | Upload grpc security option. |
|
||||
| seedPeer.config.upload.tcpListen.port | int | `65002` | Listen port. |
|
||||
| seedPeer.config.verbose | bool | `false` | Whether to enable debug level logger and enable pprof. |
|
||||
| seedPeer.config.workHome | string | `""` | Work directory. |
|
||||
| seedPeer.enable | bool | `true` | Enable dfdaemon seed peer. |
|
||||
| seedPeer.extraVolumeMounts | list | `[{"mountPath":"/var/log/dragonfly/daemon","name":"logs"}]` | Extra volumeMounts for dfdaemon. |
|
||||
| seedPeer.extraVolumes | list | `[{"emptyDir":{},"name":"logs"}]` | Extra volumes for dfdaemon. |
|
||||
| seedPeer.fullnameOverride | string | `""` | Override scheduler fullname. |
|
||||
| seedPeer.hostAliases | list | `[]` | Host Aliases. |
|
||||
| seedPeer.hostNetwork | bool | `false` | hostNetwork specify if host network should be enabled for peer pod. |
|
||||
| seedPeer.image.digest | string | `""` | Image digest. |
|
||||
| seedPeer.image.pullPolicy | string | `"IfNotPresent"` | Image pull policy. |
|
||||
| seedPeer.image.pullSecrets | list | `[]` (defaults to global.imagePullSecrets). | Image pull secrets. |
|
||||
| seedPeer.image.registry | string | `"docker.io"` | Image registry. |
|
||||
| seedPeer.image.repository | string | `"dragonflyoss/dfdaemon"` | Image repository. |
|
||||
| seedPeer.image.tag | string | `"v2.1.44"` | Image tag. |
|
||||
| seedPeer.initContainer.image.digest | string | `""` | Image digest. |
|
||||
| seedPeer.initContainer.image.pullPolicy | string | `"IfNotPresent"` | Image pull policy. |
|
||||
| seedPeer.initContainer.image.registry | string | `"docker.io"` | Image registry. |
|
||||
| seedPeer.initContainer.image.repository | string | `"busybox"` | Image repository. |
|
||||
| seedPeer.initContainer.image.tag | string | `"latest"` | Image tag. |
|
||||
| seedPeer.maxProcs | string | `""` | maxProcs Limits the number of operating system threads that can execute user-level. Go code simultaneously by setting GOMAXPROCS environment variable, refer to https://golang.org/pkg/runtime. |
|
||||
| seedPeer.metrics.enable | bool | `false` | Enable seed peer metrics. |
|
||||
| seedPeer.metrics.prometheusRule.additionalLabels | object | `{}` | Additional labels. |
|
||||
| seedPeer.metrics.prometheusRule.enable | bool | `false` | Enable prometheus rule ref: https://github.com/coreos/prometheus-operator. |
|
||||
| seedPeer.metrics.prometheusRule.rules | list | `[{"alert":"SeedPeerDown","annotations":{"message":"Seed peer instance {{ \"{{ $labels.instance }}\" }} is down","summary":"Seed peer instance is down"},"expr":"sum(dragonfly_dfdaemon_version{container=\"seed-peer\"}) == 0","for":"5m","labels":{"severity":"critical"}},{"alert":"SeedPeerHighNumberOfFailedDownloadTask","annotations":{"message":"Seed peer has a high number of failed download task","summary":"Seed peer has a high number of failed download task"},"expr":"sum(irate(dragonfly_dfdaemon_seed_peer_download_failure_total{}[1m])) > 100","for":"1m","labels":{"severity":"warning"}},{"alert":"SeedPeerSuccessRateOfDownloadingTask","annotations":{"message":"Seed peer's success rate of downloading task is low","summary":"Seed peer's success rate of downloading task is low"},"expr":"(sum(rate(dragonfly_dfdaemon_seed_peer_download_total{}[1m])) - sum(rate(dragonfly_dfdaemon_seed_peer_download_failure_total{}[1m]))) / sum(rate(dragonfly_dfdaemon_seed_peer_download_total{}[1m])) < 0.6","for":"5m","labels":{"severity":"critical"}},{"alert":"SeedPeerHighNumberOfFailedGRPCRequest","annotations":{"message":"Seed peer has a high number of failed grpc request","summary":"Seed peer has a high number of failed grpc request"},"expr":"sum(rate(grpc_server_started_total{grpc_service=\"cdnsystem.Seeder\",grpc_type=\"unary\"}[1m])) - sum(rate(grpc_server_handled_total{grpc_service=\"cdnsystem.Seeder\",grpc_type=\"unary\",grpc_code=\"OK\"}[1m])) + sum(rate(grpc_server_handled_total{grpc_service=\"cdnsystem.Seeder\",grpc_type=\"unary\",grpc_code=\"NotFound\"}[1m])) + sum(rate(grpc_server_handled_total{grpc_service=\"cdnsystem.Seeder\",grpc_type=\"unary\",grpc_code=\"PermissionDenied\"}[1m])) + sum(rate(grpc_server_handled_total{grpc_service=\"cdnsystem.Seeder\",grpc_type=\"unary\",grpc_code=\"InvalidArgument\"}[1m])) > 100","for":"1m","labels":{"severity":"warning"}},{"alert":"SeedPeerSuccessRateOfGRPCRequest","annotations":{"message":"Seed peer's success rate of grpc request is low","summary":"Seed peer's success rate of grpc request is low"},"expr":"(sum(rate(grpc_server_handled_total{grpc_service=\"cdnsystem.Seeder\",grpc_type=\"unary\",grpc_code=\"OK\"}[1m])) + sum(rate(grpc_server_handled_total{grpc_service=\"cdnsystem.Seeder\",grpc_type=\"unary\",grpc_code=\"NotFound\"}[1m])) + sum(rate(grpc_server_handled_total{grpc_service=\"cdnsystem.Seeder\",grpc_type=\"unary\",grpc_code=\"PermissionDenied\"}[1m])) + sum(rate(grpc_server_handled_total{grpc_service=\"cdnsystem.Seeder\",grpc_type=\"unary\",grpc_code=\"InvalidArgument\"}[1m]))) / sum(rate(grpc_server_started_total{grpc_service=\"cdnsystem.Seeder\",grpc_type=\"unary\"}[1m])) < 0.6","for":"5m","labels":{"severity":"critical"}}]` | Prometheus rules. |
|
||||
| seedPeer.metrics.service.annotations | object | `{}` | Service annotations. |
|
||||
| seedPeer.metrics.service.labels | object | `{}` | Service labels. |
|
||||
| seedPeer.metrics.service.type | string | `"ClusterIP"` | Service type. |
|
||||
| seedPeer.metrics.serviceMonitor.additionalLabels | object | `{}` | Additional labels |
|
||||
| seedPeer.metrics.serviceMonitor.enable | bool | `false` | Enable prometheus service monitor. ref: https://github.com/coreos/prometheus-operator. |
|
||||
| seedPeer.metrics.serviceMonitor.interval | string | `"30s"` | Interval at which metrics should be scraped. |
|
||||
| seedPeer.metrics.serviceMonitor.scrapeTimeout | string | `"10s"` | Timeout after which the scrape is ended. |
|
||||
| seedPeer.name | string | `"seed-peer"` | Seed peer name. |
|
||||
| seedPeer.nameOverride | string | `""` | Override scheduler name. |
|
||||
| seedPeer.nodeSelector | object | `{}` | Node labels for pod assignment. |
|
||||
| seedPeer.persistence.accessModes | list | `["ReadWriteOnce"]` | Persistence access modes. |
|
||||
| seedPeer.persistence.annotations | object | `{}` | Persistence annotations. |
|
||||
| seedPeer.persistence.enable | bool | `true` | Enable persistence for seed peer. |
|
||||
| seedPeer.persistence.size | string | `"8Gi"` | Persistence persistence size. |
|
||||
| seedPeer.podAnnotations | object | `{}` | Pod annotations. |
|
||||
| seedPeer.podLabels | object | `{}` | Pod labels. |
|
||||
| seedPeer.priorityClassName | string | `""` | Pod priorityClassName. |
|
||||
| seedPeer.replicas | int | `3` | Number of Pods to launch. |
|
||||
| seedPeer.resources | object | `{"limits":{"cpu":"2","memory":"4Gi"},"requests":{"cpu":"0","memory":"0"}}` | Pod resource requests and limits. |
|
||||
| seedPeer.statefulsetAnnotations | object | `{}` | Statefulset annotations. |
|
||||
| seedPeer.terminationGracePeriodSeconds | string | `nil` | Pod terminationGracePeriodSeconds. |
|
||||
| seedPeer.tolerations | list | `[]` | List of node taints to tolerate. |
|
||||
| seedPeer.updateStrategy | object | `{}` | Update strategy for replicas. |
|
||||
| trainer.config.console | bool | `false` | Console shows log on console. |
|
||||
| trainer.config.jaeger | string | `""` | |
|
||||
| trainer.config.manager.Addr | string | `"127.0.0.1:65003"` | Manager Service Address. |
|
||||
| trainer.config.network.enableIPv6 | bool | `false` | enableIPv6 enables ipv6. |
|
||||
| trainer.config.pprofPort | int | `-1` | Listen port for pprof, only valid when the verbose option is true. default is -1. If it is 0, pprof will use a random port. |
|
||||
| trainer.config.security.autoIssueCert | bool | `false` | AutoIssueCert indicates to issue client certificates for all grpc call. If AutoIssueCert is false, any other option in Security will be ignored. |
|
||||
| trainer.config.security.caCert | string | `""` | CACert is the root CA certificate for all grpc tls handshake, it can be path or PEM format string. |
|
||||
| trainer.config.security.certSpec.dnsNames | list | `["dragonfly-trainer","dragonfly-trainer.dragonfly-system.svc","dragonfly-trainer.dragonfly-system.svc.cluster.local"]` | DNSNames is a list of dns names be set on the certificate. |
|
||||
| trainer.config.security.certSpec.ipAddresses | string | `nil` | IPAddresses is a list of ip addresses be set on the certificate. |
|
||||
| trainer.config.security.certSpec.validityPeriod | string | `"4320h"` | ValidityPeriod is the validity period of certificate. |
|
||||
| trainer.config.security.tlsPolicy | string | `"prefer"` | TLSPolicy controls the grpc shandshake behaviors: force: both ClientHandshake and ServerHandshake are only support tls. prefer: ServerHandshake supports tls and insecure (non-tls), ClientHandshake will only support tls. default: ServerHandshake supports tls and insecure (non-tls), ClientHandshake will only support insecure (non-tls). Notice: If the drgaonfly service has been deployed, a two-step upgrade is required. The first step is to set tlsPolicy to default, and then upgrade the dragonfly services. The second step is to set tlsPolicy to prefer, and tthen completely upgrade the dragonfly services. |
|
||||
| trainer.config.security.tlsVerify | bool | `false` | TLSVerify indicates to verify certificates. |
|
||||
| trainer.config.server.advertiseIP | string | `""` | Advertise ip. |
|
||||
| trainer.config.server.advertisePort | int | `9090` | Advertise port. |
|
||||
| trainer.config.server.dataDir | string | `""` | Storage directory. |
|
||||
| trainer.config.server.listenIP | string | `"0.0.0.0"` | Listen ip. |
|
||||
| trainer.config.server.logDir | string | `""` | Log directory. |
|
||||
| trainer.config.server.port | int | `9090` | Server port. |
|
||||
| trainer.config.server.workHome | string | `""` | Work directory. |
|
||||
| trainer.config.verbose | bool | `false` | Whether to enable debug level logger and enable pprof. |
|
||||
| trainer.containerPort | int | `9090` | Pod containerPort. |
|
||||
| trainer.deploymentAnnotations | object | `{}` | Deployment annotations. |
|
||||
| trainer.enable | bool | `false` | Enable trainer. |
|
||||
| trainer.extraVolumeMounts | list | `[{"mountPath":"/var/log/dragonfly/trainer","name":"logs"}]` | Extra volumeMounts for trainer. |
|
||||
| trainer.extraVolumes | list | `[{"emptyDir":{},"name":"logs"}]` | Extra volumes for trainer. |
|
||||
| trainer.fullnameOverride | string | `""` | Override trainer fullname. |
|
||||
| trainer.hostAliases | list | `[]` | Host Aliases. |
|
||||
| trainer.image.digest | string | `""` | Image digest. |
|
||||
| trainer.image.pullPolicy | string | `"IfNotPresent"` | Image pull policy. |
|
||||
| trainer.image.pullSecrets | list | `[]` (defaults to global.imagePullSecrets). | Image pull secrets. |
|
||||
| trainer.image.registry | string | `"docker.io"` | Image registry. |
|
||||
| trainer.image.repository | string | `"dragonflyoss/trainer"` | Image repository. |
|
||||
| trainer.image.tag | string | `"v2.1.44"` | Image tag. |
|
||||
| trainer.maxProcs | string | `""` | maxProcs Limits the number of operating system threads that can execute user-level. Go code simultaneously by setting GOMAXPROCS environment variable, refer to https://golang.org/pkg/runtime. |
|
||||
| trainer.metrics.enable | bool | `false` | Enable trainer metrics. |
|
||||
| trainer.metrics.prometheusRule.additionalLabels | object | `{}` | Additional labels. |
|
||||
| trainer.metrics.prometheusRule.enable | bool | `false` | Enable prometheus rule. ref: https://github.com/coreos/prometheus-operator. |
|
||||
| trainer.metrics.prometheusRule.rules | list | `[{"alert":"TrainerDown","annotations":{"message":"Trainer instance {{ \"{{ $labels.instance }}\" }} is down","summary":"Trainer instance is down"},"expr":"sum(dragonfly_trainer_version{}) == 0","for":"5m","labels":{"severity":"critical"}},{"alert":"TrainerHighNumberOfFailedGRPCRequest","annotations":{"message":"Trainer has a high number of failed grpc request","summary":"Trainer has a high number of failed grpc request"},"expr":"sum(rate(grpc_server_started_total{grpc_service=\"trainer.Trainer\",grpc_type=\"unary\"}[1m])) - sum(rate(grpc_server_handled_total{grpc_service=\"trainer.Trainer\",grpc_type=\"unary\",grpc_code=\"OK\"}[1m])) + sum(rate(grpc_server_handled_total{grpc_service=\"trainer.Trainer\",grpc_type=\"unary\",grpc_code=\"NotFound\"}[1m])) + sum(rate(grpc_server_handled_total{grpc_service=\"trainer.Trainer\",grpc_type=\"unary\",grpc_code=\"PermissionDenied\"}[1m])) + sum(rate(grpc_server_handled_total{grpc_service=\"trainer.Trainer\",grpc_type=\"unary\",grpc_code=\"InvalidArgument\"}[1m])) > 100","for":"1m","labels":{"severity":"warning"}},{"alert":"TrainerSuccessRateOfGRPCRequest","annotations":{"message":"Trainer's success rate of grpc request is low","summary":"Trainer's success rate of grpc request is low"},"expr":"(sum(rate(grpc_server_handled_total{grpc_service=\"trainer.Trainer\",grpc_type=\"unary\",grpc_code=\"OK\"}[1m])) + sum(rate(grpc_server_handled_total{grpc_service=\"trainer.Trainer\",grpc_type=\"unary\",grpc_code=\"NotFound\"}[1m])) + sum(rate(grpc_server_handled_total{grpc_service=\"trainer.Trainer\",grpc_type=\"unary\",grpc_code=\"PermissionDenied\"}[1m])) + sum(rate(grpc_server_handled_total{grpc_service=\"trainer.Trainer\",grpc_type=\"unary\",grpc_code=\"InvalidArgument\"}[1m]))) / sum(rate(grpc_server_started_total{grpc_service=\"trainer.Trainer\",grpc_type=\"unary\"}[1m])) < 0.6","for":"5m","labels":{"severity":"critical"}},{"alert":"TrainerHighNumberOfFailedRESTRequest","annotations":{"message":"Trainer has a high number of failed rest request","summary":"Trainer has a high number of failed rest request"},"expr":"sum(rate(dragonfly_trainer_requests_total{}[1m])) - sum(rate(dragonfly_trainer_requests_total{code=~\"[12]..\"}[1m])) > 100","for":"1m","labels":{"severity":"warning"}},{"alert":"TrainerSuccessRateOfRESTRequest","annotations":{"message":"Trainer's success rate of rest request is low","summary":"Trainer's success rate of rest request is low"},"expr":"sum(rate(dragonfly_trainer_requests_total{code=~\"[12]..\"}[1m])) / sum(rate(dragonfly_trainer_requests_total{}[1m])) < 0.6","for":"5m","labels":{"severity":"critical"}}]` | Prometheus rules. |
|
||||
| trainer.metrics.service.annotations | object | `{}` | Service annotations. |
|
||||
| trainer.metrics.service.labels | object | `{}` | Service labels. |
|
||||
| trainer.metrics.service.type | string | `"ClusterIP"` | Service type. |
|
||||
| trainer.metrics.serviceMonitor.additionalLabels | object | `{}` | Additional labels. |
|
||||
| trainer.metrics.serviceMonitor.enable | bool | `false` | Enable prometheus service monitor. ref: https://github.com/coreos/prometheus-operator. |
|
||||
| trainer.metrics.serviceMonitor.interval | string | `"30s"` | Interval at which metrics should be scraped. |
|
||||
| trainer.metrics.serviceMonitor.scrapeTimeout | string | `"10s"` | Timeout after which the scrape is ended. |
|
||||
| trainer.name | string | `"trainer"` | trainer name. |
|
||||
| trainer.nameOverride | string | `""` | Override trainer name. |
|
||||
| trainer.nodeSelector | object | `{}` | Node labels for pod assignment. |
|
||||
| trainer.podAnnotations | object | `{}` | Pod annotations. |
|
||||
| trainer.podLabels | object | `{}` | Pod labels. |
|
||||
| trainer.priorityClassName | string | `""` | Pod priorityClassName. |
|
||||
| trainer.replicas | int | `1` | Number of Pods to launch. |
|
||||
| trainer.resources | object | `{"limits":{"cpu":"2","memory":"4Gi"},"requests":{"cpu":"0","memory":"0"}}` | Pod resource requests and limits. |
|
||||
| trainer.service.annotations | object | `{}` | Service annotations. |
|
||||
| trainer.service.labels | object | `{}` | Service labels. |
|
||||
| trainer.service.type | string | `"ClusterIP"` | Service type. |
|
||||
| trainer.terminationGracePeriodSeconds | string | `nil` | Pod terminationGracePeriodSeconds. |
|
||||
| trainer.tolerations | list | `[]` | List of node taints to tolerate. |
|
||||
| trainer.updateStrategy | object | `{"type":"RollingUpdate"}` | Update strategy for replicas. |
|
||||
| triton.aws | object | `{"accessKeyID":"","region":"","secretAccessKey":""}` | Credentials information. |
|
||||
| triton.deploymentAnnotations | object | `{}` | Deployment annotations. |
|
||||
| triton.enable | bool | `false` | Enable triton. |
|
||||
| triton.fullnameOverride | string | `""` | Override triton fullname. |
|
||||
| triton.grpcPort | int | `8001` | GRPC service port. |
|
||||
| triton.hostAliases | list | `[]` | Host Aliases. |
|
||||
| triton.image.digest | string | `""` | Image digest. |
|
||||
| triton.image.pullPolicy | string | `"IfNotPresent"` | Image pull policy. |
|
||||
| triton.image.pullSecrets | list | `[]` (defaults to global.imagePullSecrets). | Image pull secrets. |
|
||||
| triton.image.registry | string | `"nvcr.io"` | Image registry. |
|
||||
| triton.image.repository | string | `"nvidia/tritonserver"` | Image repository. |
|
||||
| triton.image.tag | string | `"23.06-py3"` | Image tag. |
|
||||
| triton.maxProcs | string | `""` | maxProcs Limits the number of operating system threads that can execute user-level. Go code simultaneously by setting GOMAXPROCS environment variable, refer to https://golang.org/pkg/runtime. |
|
||||
| triton.modelRepositoryPath | string | `""` | Model repository path. |
|
||||
| triton.name | string | `"triton"` | triton name. |
|
||||
| triton.nameOverride | string | `""` | Override triton name. |
|
||||
| triton.nodeSelector | object | `{}` | Node labels for pod assignment. |
|
||||
| triton.podAnnotations | object | `{}` | Pod annotations. |
|
||||
| triton.priorityClassName | string | `""` | Pod priorityClassName. |
|
||||
| triton.replicas | int | `3` | Number of Pods to launch. |
|
||||
| triton.restPort | int | `8000` | REST service port. |
|
||||
| triton.service.type | string | `"LoadBalancer"` | Service type. |
|
||||
| triton.terminationGracePeriodSeconds | string | `nil` | Pod terminationGracePeriodSeconds. |
|
||||
| triton.tolerations | list | `[]` | List of node taints to tolerate. |
|
||||
| triton.updateStrategy | object | `{}` | Update strategy for replicas. |
|
||||
|
||||
## Chart dependencies
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
| https://charts.bitnami.com/bitnami | mysql | 9.4.6 |
|
||||
| https://charts.bitnami.com/bitnami | redis | 17.4.3 |
|
||||
| https://jaegertracing.github.io/helm-charts | jaeger | 0.66.1 |
|
||||
| https://charts.bitnami.com/bitnami | mysql | 10.1.1 |
|
||||
| https://charts.bitnami.com/bitnami | redis | 19.5.5 |
|
||||
|
|
|
@ -81,13 +81,12 @@ scheduler:
|
|||
manager:
|
||||
schedulerClusterID: 1
|
||||
|
||||
seedPeer:
|
||||
seedClient:
|
||||
config:
|
||||
scheduler:
|
||||
manager:
|
||||
seedPeer:
|
||||
enable: true
|
||||
clusterID: 1
|
||||
seedPeer:
|
||||
enable: true
|
||||
type: super
|
||||
clusterID: 1
|
||||
|
||||
manager:
|
||||
enable: false
|
||||
|
|
|
@ -1,20 +1,14 @@
|
|||
1. Get the scheduler address by running these commands:
|
||||
1. Get the manager address by running these commands:
|
||||
export MANAGER_POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ include "dragonfly.fullname" . }},release={{ .Release.Name }},component=manager" -o jsonpath={.items[0].metadata.name})
|
||||
export MANAGER_CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $MANAGER_POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
|
||||
kubectl --namespace {{ .Release.Namespace }} port-forward $MANAGER_POD_NAME 8080:$MANAGER_CONTAINER_PORT
|
||||
echo "Visit http://127.0.0.1:8080 to use your manager"
|
||||
|
||||
2. Get the scheduler address by running these commands:
|
||||
export SCHEDULER_POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ include "dragonfly.fullname" . }},release={{ .Release.Name }},component=scheduler" -o jsonpath={.items[0].metadata.name})
|
||||
export SCHEDULER_CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $SCHEDULER_POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
|
||||
kubectl --namespace {{ .Release.Namespace }} port-forward $SCHEDULER_POD_NAME 8002:$SCHEDULER_CONTAINER_PORT
|
||||
echo "Visit http://127.0.0.1:8002 to use your scheduler"
|
||||
|
||||
2. Get the dfdaemon port by running these commands:
|
||||
export DFDAEMON_POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ include "dragonfly.fullname" . }},release={{ .Release.Name }},component=dfdaemon" -o jsonpath={.items[0].metadata.name})
|
||||
export DFDAEMON_CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $DFDAEMON_POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
|
||||
You can use $DFDAEMON_CONTAINER_PORT as a proxy port in Node.
|
||||
|
||||
3. Configure runtime to use dragonfly:
|
||||
https://d7y.io/docs/getting-started/quick-start/kubernetes/
|
||||
|
||||
{{ if .Values.jaeger.enable }}
|
||||
4. Get Jaeger query URL by running these commands:
|
||||
export JAEGER_QUERY_PORT=$(kubectl --namespace {{ .Release.Namespace }} get services {{ .Release.Name }}-jaeger-query -o jsonpath="{.spec.ports[0].port}")
|
||||
kubectl --namespace {{ .Release.Namespace }} port-forward service/{{ .Release.Name }}-jaeger-query 16686:$JAEGER_QUERY_PORT
|
||||
echo "Visit http://127.0.0.1:16686/search?limit=20&lookback=1h&maxDuration&minDuration&service=dragonfly to query download events"
|
||||
{{- end }}
|
||||
|
|
|
@ -6,6 +6,13 @@ Expand the name of the chart.
|
|||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Allow the release namespace to be overridden for multi-namespace deployments in combined charts.
|
||||
*/}}
|
||||
{{- define "common.names.namespace" -}}
|
||||
{{- default .Release.Namespace .Values.namespaceOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
|
@ -63,38 +70,6 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this
|
|||
{{ template "dragonfly.fullname" . }}-dfinit
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified seed peer name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
*/}}
|
||||
{{- define "dragonfly.seedPeer.fullname" -}}
|
||||
{{ template "dragonfly.fullname" . }}-{{ .Values.seedPeer.name }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified dfdaemon name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
*/}}
|
||||
{{- define "dragonfly.dfdaemon.fullname" -}}
|
||||
{{ template "dragonfly.fullname" . }}-{{ .Values.dfdaemon.name }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified trainer name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
*/}}
|
||||
{{- define "dragonfly.trainer.fullname" -}}
|
||||
{{ template "dragonfly.fullname" . }}-{{ .Values.trainer.name }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified triton name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
*/}}
|
||||
{{- define "dragonfly.triton.fullname" -}}
|
||||
{{ template "dragonfly.fullname" . }}-{{ .Values.triton.name }}
|
||||
{{- end -}}
|
||||
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Return the proper image name
|
||||
|
@ -156,34 +131,6 @@ Return the proper image name (for the client dfinit image)
|
|||
{{- include "common.images.image" ( dict "imageRoot" .Values.client.dfinit.image "global" .Values.global ) -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return the proper image name (for the dfdaemon image)
|
||||
*/}}
|
||||
{{- define "dfdaemon.image" -}}
|
||||
{{- include "common.images.image" ( dict "imageRoot" .Values.dfdaemon.image "global" .Values.global ) -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return the proper image name (for the seedPeer image)
|
||||
*/}}
|
||||
{{- define "seedPeer.image" -}}
|
||||
{{- include "common.images.image" ( dict "imageRoot" .Values.seedPeer.image "global" .Values.global ) -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return the proper image name (for the trainer image)
|
||||
*/}}
|
||||
{{- define "trainer.image" -}}
|
||||
{{- include "common.images.image" ( dict "imageRoot" .Values.trainer.image "global" .Values.global ) -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return the proper image name (for the triton image)
|
||||
*/}}
|
||||
{{- define "triton.image" -}}
|
||||
{{- include "common.images.image" ( dict "imageRoot" .Values.triton.image "global" .Values.global ) -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return the proper image name (for the manager initContainer image)
|
||||
*/}}
|
||||
|
@ -212,27 +159,6 @@ Return the proper image name (for the seedClient initContainer image)
|
|||
{{- include "common.images.image" ( dict "imageRoot" .Values.seedClient.initContainer.image "global" .Values.global ) -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return the proper image name (for the dfdaemon initContainer image)
|
||||
*/}}
|
||||
{{- define "dfdaemon.initContainer.image" -}}
|
||||
{{- include "common.images.image" ( dict "imageRoot" .Values.dfdaemon.initContainer.image "global" .Values.global ) -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return the proper image name (for the seedPeer initContainer image)
|
||||
*/}}
|
||||
{{- define "seedPeer.initContainer.image" -}}
|
||||
{{- include "common.images.image" ( dict "imageRoot" .Values.seedPeer.initContainer.image "global" .Values.global ) -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return the proper image name (for the containerRuntime initContainer image)
|
||||
*/}}
|
||||
{{- define "containerRuntime.initContainer.image" -}}
|
||||
{{- include "common.images.image" ( dict "imageRoot" .Values.containerRuntime.initContainer.image "global" .Values.global ) -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Return the proper Storage Class
|
||||
|
|
|
@ -3,9 +3,9 @@ apiVersion: v1
|
|||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "dragonfly.client.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.client.name }}
|
||||
|
@ -20,13 +20,12 @@ data:
|
|||
upload:
|
||||
{{ toYaml .Values.client.config.upload | indent 6 }}
|
||||
manager:
|
||||
addrs:
|
||||
{{- if .Values.client.config.manager.addrs }}
|
||||
{{ toYaml .Values.client.config.manager.addrs | indent 6 }}
|
||||
addr: {{ .Values.client.config.manager.addr }}
|
||||
{{- else if .Values.manager.enable }}
|
||||
- http://{{ template "dragonfly.manager.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.manager.grpcPort }}
|
||||
addr: http://{{ template "dragonfly.manager.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.manager.grpcPort }}
|
||||
{{- else }}
|
||||
- {{ .Values.externalManager.host }}:{{ .Values.externalManager.grpcPort }}
|
||||
addr: http://{{ .Values.externalManager.host }}:{{ .Values.externalManager.grpcPort }}
|
||||
{{- end }}
|
||||
scheduler:
|
||||
{{ toYaml .Values.client.config.scheduler | indent 6 }}
|
||||
|
@ -38,18 +37,12 @@ data:
|
|||
{{ toYaml .Values.client.config.gc | indent 6 }}
|
||||
proxy:
|
||||
{{ toYaml .Values.client.config.proxy | indent 6 }}
|
||||
security:
|
||||
{{ toYaml .Values.client.config.security | indent 6 }}
|
||||
health:
|
||||
{{ toYaml .Values.client.config.health | indent 6 }}
|
||||
metrics:
|
||||
{{ toYaml .Values.client.config.metrics | indent 6 }}
|
||||
stats:
|
||||
{{ toYaml .Values.client.config.stats | indent 6 }}
|
||||
{{- if .Values.client.config.tracing }}
|
||||
tracing:
|
||||
{{ toYaml .Values.client.config.tracing | indent 6 }}
|
||||
{{- else if .Values.jaeger.enable }}
|
||||
jaeger: http://{{ $.Release.Name }}-jaeger-collector.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }}:14268/api/traces
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
|
|
@ -3,9 +3,9 @@ apiVersion: apps/v1
|
|||
kind: DaemonSet
|
||||
metadata:
|
||||
name: {{ template "dragonfly.client.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.client.name }}
|
||||
|
@ -22,32 +22,33 @@ spec:
|
|||
matchLabels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
component: "{{ .Values.client.name }}"
|
||||
release: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
component: "{{ .Values.client.name }}"
|
||||
release: {{ .Release.Name }}
|
||||
{{- if .Values.client.podLabels }}
|
||||
{{ toYaml .Values.client.podLabels | indent 8 }}
|
||||
{{- end }}
|
||||
annotations:
|
||||
checksum/config: {{ include (print $.Template.BasePath "/client/client-configmap.yaml") . | sha256sum }}
|
||||
{{- if .Values.client.dfinit.enable }}
|
||||
checksum/dfinit-config: {{ include (print $.Template.BasePath "/client/dfinit-configmap.yaml") . | sha256sum }}
|
||||
{{- end }}
|
||||
{{- if .Values.client.podAnnotations }}
|
||||
{{ toYaml .Values.client.podAnnotations | indent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.client.nodeSelector | default .Values.global.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
hostNetwork: {{ .Values.client.hostNetwork }}
|
||||
{{- if .Values.client.hostNetwork }}
|
||||
dnsPolicy: "ClusterFirstWithHostNet"
|
||||
{{- end }}
|
||||
hostPID: {{ .Values.client.hostPID }}
|
||||
hostIPC: {{ .Values.client.hostIPC }}
|
||||
{{- with .Values.client.nodeSelector | default .Values.global.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.client.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml .Values.client.tolerations | indent 8 }}
|
||||
|
@ -59,7 +60,7 @@ spec:
|
|||
{{- if quote .Values.client.terminationGracePeriodSeconds }}
|
||||
terminationGracePeriodSeconds: {{ .Values.client.terminationGracePeriodSeconds }}
|
||||
{{- end }}
|
||||
{{- if and (.Capabilities.APIVersions.Has "scheduling.k8s.io/v1beta1") (.Values.client.priorityClassName) }}
|
||||
{{- if .Values.client.priorityClassName }}
|
||||
priorityClassName: {{ .Values.client.priorityClassName }}
|
||||
{{- end }}
|
||||
{{- with .Values.client.image.pullSecrets | default .Values.global.imagePullSecrets }}
|
||||
|
@ -83,9 +84,11 @@ spec:
|
|||
imagePullPolicy: {{ .Values.client.dfinit.image.pullPolicy }}
|
||||
args:
|
||||
- --log-level={{ .Values.client.dfinit.config.log.level }}
|
||||
{{- if .Values.client.dfinit.config.verbose }}
|
||||
- --verbose
|
||||
{{- if .Values.client.dfinit.config.console }}
|
||||
- --console
|
||||
{{- end }}
|
||||
resources:
|
||||
{{ toYaml .Values.client.initContainer.resources | indent 10 }}
|
||||
volumeMounts:
|
||||
- name: dfinit-config
|
||||
mountPath: "/etc/dragonfly"
|
||||
|
@ -97,8 +100,16 @@ spec:
|
|||
- name: crio-config-dir
|
||||
mountPath: {{ dir .Values.client.dfinit.config.containerRuntime.crio.configPath }}
|
||||
{{- end }}
|
||||
{{- if and (.Values.client.dfinit.enable) (.Values.client.dfinit.config.containerRuntime.podman) }}
|
||||
- name: podman-config-dir
|
||||
mountPath: {{ dir .Values.client.dfinit.config.containerRuntime.podman.configPath }}
|
||||
{{- end }}
|
||||
{{- if and (.Values.client.dfinit.enable) (.Values.client.dfinit.config.containerRuntime.docker) }}
|
||||
- name: docker-config-dir
|
||||
mountPath: {{ dir .Values.client.dfinit.config.containerRuntime.docker.configPath }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.client.dfinit.enable }}
|
||||
{{- if and .Values.client.dfinit.enable .Values.client.dfinit.restartContainerRuntime }}
|
||||
- name: restart-container-runtime
|
||||
image: {{ template "client.initContainer.image" . }}
|
||||
imagePullPolicy: {{ .Values.client.initContainer.image.pullPolicy }}
|
||||
|
@ -115,9 +126,17 @@ spec:
|
|||
{{- else if .Values.client.dfinit.config.containerRuntime.crio }}
|
||||
nsenter -t 1 -m -- systemctl restart crio.service
|
||||
echo "restart cri-o"
|
||||
{{- else if .Values.client.dfinit.config.containerRuntime.podman }}
|
||||
nsenter -t 1 -m -- systemctl restart podman.service
|
||||
echo "restart podman"
|
||||
{{- else if .Values.client.dfinit.config.containerRuntime.docker }}
|
||||
nsenter -t 1 -m -- systemctl restart docker.service
|
||||
echo "restart docker"
|
||||
{{- else }}
|
||||
echo "no container runtime to restart"
|
||||
{{- end }}
|
||||
resources:
|
||||
{{ toYaml .Values.client.initContainer.resources | indent 10 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: client
|
||||
|
@ -125,11 +144,11 @@ spec:
|
|||
imagePullPolicy: {{ .Values.client.image.pullPolicy | quote }}
|
||||
args:
|
||||
- --log-level={{ .Values.client.config.log.level }}
|
||||
{{- if .Values.client.config.verbose }}
|
||||
- --verbose
|
||||
{{- if .Values.client.config.console }}
|
||||
- --console
|
||||
{{- end }}
|
||||
resources:
|
||||
{{ toYaml .Values.client.resources | indent 12 }}
|
||||
{{ toYaml .Values.client.resources | indent 10 }}
|
||||
env:
|
||||
{{- if .Values.client.maxProcs }}
|
||||
- name: GOMAXPROCS
|
||||
|
@ -146,17 +165,21 @@ spec:
|
|||
protocol: TCP
|
||||
readinessProbe:
|
||||
exec:
|
||||
command: ["/bin/grpc_health_probe", "-addr=:{{ .Values.client.config.upload.server.port }}"]
|
||||
command: ["/bin/grpc_health_probe", "-addr=unix://{{ .Values.client.config.download.server.socketPath }}"]
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
periodSeconds: 30
|
||||
timeoutSeconds: 5
|
||||
livenessProbe:
|
||||
exec:
|
||||
command: ["/bin/grpc_health_probe", "-addr=:{{ .Values.client.config.upload.server.port }}"]
|
||||
command: ["/bin/grpc_health_probe", "-addr=unix://{{ .Values.client.config.download.server.socketPath }}"]
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 10
|
||||
periodSeconds: 30
|
||||
timeoutSeconds: 5
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: "/etc/dragonfly"
|
||||
- name: socket-dir
|
||||
mountPath: /var/run/dragonfly
|
||||
{{- if .Values.client.extraVolumeMounts }}
|
||||
{{- toYaml .Values.client.extraVolumeMounts | nindent 8 }}
|
||||
{{- end }}
|
||||
|
@ -164,6 +187,10 @@ spec:
|
|||
- name: config
|
||||
configMap:
|
||||
name: {{ template "dragonfly.client.fullname" . }}
|
||||
- name: socket-dir
|
||||
hostPath:
|
||||
path: /var/run/dragonfly
|
||||
type: DirectoryOrCreate
|
||||
{{- if .Values.client.dfinit.enable }}
|
||||
- name: dfinit-config
|
||||
configMap:
|
||||
|
@ -181,6 +208,18 @@ spec:
|
|||
path: {{ dir .Values.client.dfinit.config.containerRuntime.crio.configPath }}
|
||||
type: DirectoryOrCreate
|
||||
{{- end }}
|
||||
{{- if and (.Values.client.dfinit.enable) (.Values.client.dfinit.config.containerRuntime.podman) }}
|
||||
- name: podman-config-dir
|
||||
hostPath:
|
||||
path: {{ dir .Values.client.dfinit.config.containerRuntime.podman.configPath }}
|
||||
type: DirectoryOrCreate
|
||||
{{- end }}
|
||||
{{- if and (.Values.client.dfinit.enable) (.Values.client.dfinit.config.containerRuntime.docker) }}
|
||||
- name: docker-config-dir
|
||||
hostPath:
|
||||
path: {{ dir .Values.client.dfinit.config.containerRuntime.docker.configPath }}
|
||||
type: DirectoryOrCreate
|
||||
{{- end }}
|
||||
{{- if .Values.client.extraVolumes }}
|
||||
{{- toYaml .Values.client.extraVolumes | nindent 6 }}
|
||||
{{- end }}
|
||||
|
|
|
@ -3,9 +3,9 @@ apiVersion: v1
|
|||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "dragonfly.dfinit.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.client.name }}
|
||||
|
|
|
@ -1,16 +1,16 @@
|
|||
{{- if and .Values.client.metrics.enable .Values.client.metrics.serviceMonitor.enable }}
|
||||
{{- if .Values.client.metrics.enable }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "dragonfly.client.fullname" . }}-metrics
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.client.name }}-metrics
|
||||
{{- if .Values.client.metrics.service.labels }}
|
||||
{{ toYaml .Values.metrics.service.labels | indent 4 }}
|
||||
{{ toYaml .Values.client.metrics.service.labels | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.client.metrics.service.annotations }}
|
||||
annotations:
|
||||
|
@ -21,10 +21,10 @@ spec:
|
|||
ports:
|
||||
- port: {{ .Values.client.config.metrics.server.port }}
|
||||
name: http-metrics
|
||||
targetPort: {{ .Values.client.config.metrics.server.port }}
|
||||
appProtocol: http
|
||||
protocol: TCP
|
||||
targetPort: {{ .Values.client.config.metrics.server.port }}
|
||||
selector:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
component: {{ .Values.client.name }}
|
||||
release: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
|
|
@ -3,9 +3,9 @@ apiVersion: monitoring.coreos.com/v1
|
|||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: {{ template "dragonfly.client.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.client.name }}
|
||||
|
|
|
@ -3,9 +3,9 @@ apiVersion: monitoring.coreos.com/v1
|
|||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ template "dragonfly.client.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.client.name }}
|
||||
|
@ -27,6 +27,5 @@ spec:
|
|||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
component: {{ .Values.client.name }}-metrics
|
||||
{{- end }}
|
||||
|
|
|
@ -1,109 +0,0 @@
|
|||
{{- if .Values.dfdaemon.enable }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "dragonfly.dfdaemon.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.dfdaemon.name }}
|
||||
data:
|
||||
dfget.yaml: |-
|
||||
aliveTime: {{ .Values.dfdaemon.config.aliveTime }}
|
||||
gcInterval: {{ .Values.dfdaemon.config.gcInterval }}
|
||||
keepStorage: {{ .Values.dfdaemon.config.keepStorage }}
|
||||
workHome: {{ .Values.dfdaemon.config.workHome }}
|
||||
logDir: {{ .Values.dfdaemon.config.logDir }}
|
||||
cacheDir: {{ .Values.dfdaemon.config.cacheDir }}
|
||||
pluginDir: {{ .Values.dfdaemon.config.pluginDir }}
|
||||
dataDir: {{ .Values.dfdaemon.config.dataDir }}
|
||||
console: {{ .Values.dfdaemon.config.console }}
|
||||
health:
|
||||
{{ toYaml .Values.dfdaemon.config.health | indent 6 }}
|
||||
verbose: {{ .Values.dfdaemon.config.verbose }}
|
||||
{{- if .Values.dfdaemon.config.verbose }}
|
||||
pprof-port: {{ .Values.dfdaemon.config.pprofPort }}
|
||||
{{- end }}
|
||||
{{- if .Values.dfdaemon.metrics.enable }}
|
||||
metrics: ":8000"
|
||||
{{- end }}
|
||||
{{- if .Values.dfdaemon.config.jaeger }}
|
||||
jaeger: {{ .Values.dfdaemon.config.jaeger }}
|
||||
{{- else if .Values.jaeger.enable }}
|
||||
jaeger: http://{{ $.Release.Name }}-jaeger-collector.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }}:14268/api/traces
|
||||
{{- end }}
|
||||
scheduler:
|
||||
manager:
|
||||
enable: {{ .Values.dfdaemon.config.scheduler.manager.enable }}
|
||||
netAddrs:
|
||||
{{- if and (.Values.dfdaemon.config.scheduler.manager.enable) (.Values.dfdaemon.config.scheduler.manager.netAddrs) }}
|
||||
{{ toYaml .Values.dfdaemon.config.scheduler.manager.netAddrs | indent 10 }}
|
||||
{{- else if .Values.manager.enable }}
|
||||
- type: tcp
|
||||
addr: {{ template "dragonfly.manager.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.manager.grpcPort }}
|
||||
{{- else }}
|
||||
- type: tcp
|
||||
addr: {{ .Values.externalManager.host }}:{{ .Values.externalManager.grpcPort }}
|
||||
{{- end }}
|
||||
refreshInterval: {{ .Values.dfdaemon.config.scheduler.manager.refreshInterval }}
|
||||
netAddrs:
|
||||
{{- if and (not .Values.dfdaemon.config.scheduler.manager.enable) (.Values.dfdaemon.config.scheduler.netAddrs) }}
|
||||
{{ toYaml .Values.dfdaemon.config.scheduler.netAddrs | indent 8 }}
|
||||
{{- end }}
|
||||
scheduleTimeout: {{ .Values.dfdaemon.config.scheduler.scheduleTimeout }}
|
||||
disableAutoBackSource: {{ .Values.dfdaemon.config.scheduler.disableAutoBackSource }}
|
||||
seedPeer:
|
||||
{{ toYaml .Values.dfdaemon.config.scheduler.manager.seedPeer | indent 8 }}
|
||||
host:
|
||||
{{ toYaml .Values.dfdaemon.config.host | indent 6 }}
|
||||
download:
|
||||
{{ toYaml .Values.dfdaemon.config.download | indent 6 }}
|
||||
upload:
|
||||
{{ toYaml .Values.dfdaemon.config.upload | indent 6 }}
|
||||
objectStorage:
|
||||
{{ toYaml .Values.dfdaemon.config.objectStorage | indent 6 }}
|
||||
storage:
|
||||
{{ toYaml .Values.dfdaemon.config.storage | indent 6 }}
|
||||
proxy:
|
||||
defaultFilter: {{ .Values.dfdaemon.config.proxy.defaultFilter }}
|
||||
defaultTag: {{ .Values.dfdaemon.config.proxy.defaultTag }}
|
||||
tcpListen:
|
||||
{{- if not .Values.dfdaemon.hostNetwork }}
|
||||
namespace: {{ .Values.dfdaemon.config.proxy.tcpListen.namespace }}
|
||||
{{- end }}
|
||||
port: {{ .Values.dfdaemon.containerPort }}
|
||||
security:
|
||||
{{ toYaml .Values.dfdaemon.config.proxy.security | indent 8 }}
|
||||
registryMirror:
|
||||
{{ toYaml .Values.dfdaemon.config.proxy.registryMirror | indent 8 }}
|
||||
proxies:
|
||||
{{ toYaml .Values.dfdaemon.config.proxy.proxies | indent 8 }}
|
||||
{{- if .Values.containerRuntime.docker.enable }}
|
||||
hijackHTTPS:
|
||||
cert: /etc/dragonfly-ca/cacert.pem
|
||||
key: /etc/dragonfly-ca/cakey.pem
|
||||
hosts:
|
||||
- regx: .*
|
||||
insecure: {{ .Values.containerRuntime.docker.insecure }}
|
||||
{{- if and .Values.containerRuntime.docker.injectHosts (not .Values.containerRuntime.docker.restart) }}
|
||||
sni:
|
||||
{{- range .Values.containerRuntime.docker.registryPorts }}
|
||||
- listen: 127.0.0.1
|
||||
port: {{ . }}
|
||||
{{- if not $.Values.dfdaemon.hostNetwork }}
|
||||
namespace: {{ $.Values.dfdaemon.config.proxy.tcpListen.namespace }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
security:
|
||||
{{ toYaml .Values.dfdaemon.config.security | indent 6 }}
|
||||
network:
|
||||
{{ toYaml .Values.dfdaemon.config.network | indent 6 }}
|
||||
announcer:
|
||||
{{ toYaml .Values.dfdaemon.config.announcer | indent 6 }}
|
||||
networkTopology:
|
||||
{{ toYaml .Values.dfdaemon.config.networkTopology | indent 6 }}
|
||||
{{- end }}
|
|
@ -1,605 +0,0 @@
|
|||
{{- if .Values.dfdaemon.enable }}
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: {{ template "dragonfly.dfdaemon.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.dfdaemon.name }}
|
||||
annotations:
|
||||
{{- if .Values.dfdaemon.daemonsetAnnotations }}
|
||||
{{ toYaml .Values.dfdaemon.daemonsetAnnotations | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.dfdaemon.updateStrategy }}
|
||||
updateStrategy:
|
||||
{{ toYaml .Values.dfdaemon.updateStrategy | indent 4 }}
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
component: "{{ .Values.dfdaemon.name }}"
|
||||
release: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
component: "{{ .Values.dfdaemon.name }}"
|
||||
release: {{ .Release.Name }}
|
||||
{{- if .Values.dfdaemon.podLabels }}
|
||||
{{ toYaml .Values.dfdaemon.podLabels | indent 8 }}
|
||||
{{- end }}
|
||||
annotations:
|
||||
checksum/config: {{ include (print $.Template.BasePath "/dfdaemon/dfdaemon-configmap.yaml") . | sha256sum }}
|
||||
{{- if .Values.dfdaemon.podAnnotations }}
|
||||
{{ toYaml .Values.dfdaemon.podAnnotations | indent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.dfdaemon.nodeSelector | default .Values.global.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
hostNetwork: {{ .Values.dfdaemon.hostNetwork }}
|
||||
{{- if .Values.dfdaemon.hostNetwork }}
|
||||
dnsPolicy: "ClusterFirstWithHostNet"
|
||||
{{- end }}
|
||||
hostPID: {{ or (and .Values.containerRuntime.docker.enable .Values.containerRuntime.docker.restart) .Values.containerRuntime.containerd.enable .Values.containerRuntime.crio.enable (gt (len .Values.containerRuntime.extraInitContainers) 0) }}
|
||||
{{- if .Values.dfdaemon.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml .Values.dfdaemon.tolerations | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.dfdaemon.affinity }}
|
||||
affinity:
|
||||
{{ toYaml .Values.dfdaemon.affinity | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if quote .Values.dfdaemon.terminationGracePeriodSeconds }}
|
||||
terminationGracePeriodSeconds: {{ .Values.dfdaemon.terminationGracePeriodSeconds }}
|
||||
{{- end }}
|
||||
{{- if and (.Capabilities.APIVersions.Has "scheduling.k8s.io/v1beta1") (.Values.dfdaemon.priorityClassName) }}
|
||||
priorityClassName: {{ .Values.dfdaemon.priorityClassName }}
|
||||
{{- end }}
|
||||
{{- with .Values.dfdaemon.image.pullSecrets | default .Values.global.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.dfdaemon.hostAliases }}
|
||||
hostAliases:
|
||||
{{ toYaml .Values.dfdaemon.hostAliases | indent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: dfdaemon
|
||||
image: {{ template "dfdaemon.image" . }}
|
||||
imagePullPolicy: {{ .Values.dfdaemon.image.pullPolicy | quote }}
|
||||
resources:
|
||||
{{ toYaml .Values.dfdaemon.resources | indent 12 }}
|
||||
env:
|
||||
{{- if .Values.dfdaemon.maxProcs }}
|
||||
- name: GOMAXPROCS
|
||||
value: {{ .Values.dfdaemon.maxProcs }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- containerPort: {{ .Values.dfdaemon.containerPort }}
|
||||
{{- if and (not .Values.dfdaemon.hostNetwork) (empty .Values.dfdaemon.config.proxy.tcpListen.namespace) }}
|
||||
hostPort: {{ .Values.dfdaemon.hostPort }}
|
||||
hostIP: 127.0.0.1
|
||||
{{- end }}
|
||||
protocol: TCP
|
||||
- containerPort: {{ .Values.dfdaemon.config.health.tcpListen.port }}
|
||||
hostPort: {{ .Values.dfdaemon.config.health.tcpListen.port }}
|
||||
hostIP: 127.0.0.1
|
||||
protocol: TCP
|
||||
{{- if .Values.dfdaemon.config.objectStorage.enable }}
|
||||
- containerPort: {{ .Values.dfdaemon.config.objectStorage.tcpListen.port }}
|
||||
protocol: TCP
|
||||
{{- end }}
|
||||
{{- if .Values.dfdaemon.metrics.enable }}
|
||||
- containerPort: 8000
|
||||
protocol: TCP
|
||||
{{- end }}
|
||||
readinessProbe:
|
||||
exec:
|
||||
command: ["/bin/grpc_health_probe", "-addr=:{{ .Values.dfdaemon.config.download.peerGRPC.tcpListen.port }}"]
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
livenessProbe:
|
||||
exec:
|
||||
command: ["/bin/grpc_health_probe", "-addr=:{{ .Values.dfdaemon.config.download.peerGRPC.tcpListen.port }}"]
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 10
|
||||
{{- if and .Values.containerRuntime.docker.enable (not .Values.containerRuntime.docker.restart) }}
|
||||
{{- if .Values.containerRuntime.docker.injectHosts }}
|
||||
lifecycle:
|
||||
postStart:
|
||||
exec:
|
||||
command:
|
||||
- "/bin/sh"
|
||||
- "-c"
|
||||
- |
|
||||
# inject hosts after dfdaemon started
|
||||
domains="{{- join " " .Values.containerRuntime.docker.registryDomains }}"
|
||||
# remove static dns in pod /etc/hosts, which injected by host network
|
||||
echo "$(sed '/# Dragonfly SNI Host/d' /etc/hosts)" > /etc/hosts
|
||||
|
||||
if [[ -n "$domains" ]]; then
|
||||
for domain in $domains; do
|
||||
# inject static dns into /host/etc/hosts
|
||||
if grep "127.0.0.1 $domain" /host/etc/hosts; then
|
||||
echo "Dragonfly SNI Host $domain Found in /host/etc/hosts"
|
||||
continue
|
||||
else
|
||||
echo "Try to add dragonfly SNI host $domain"
|
||||
echo "127.0.0.1 $domain # Dragonfly SNI Host $domain" >> /host/etc/hosts
|
||||
echo "Dragonfly SNI host $domain added"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- "/bin/sh"
|
||||
- "-c"
|
||||
- |
|
||||
# when stop dfdaemon, clean up injected hosts info in /etc/hosts for current node
|
||||
echo "$(sed '/# Dragonfly SNI Host/d' /host/etc/hosts)" > /host/etc/hosts
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: "/etc/dragonfly"
|
||||
{{- if and .Values.containerRuntime.docker.enable (not .Values.containerRuntime.docker.restart) }}
|
||||
{{- if and .Values.containerRuntime.docker.injectHosts }}
|
||||
- name: etc
|
||||
mountPath: /host/etc
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.containerRuntime.docker.enable }}
|
||||
- name: d7y-ca
|
||||
mountPath: /etc/dragonfly-ca
|
||||
{{- end }}
|
||||
{{- if .Values.dfdaemon.extraVolumeMounts }}
|
||||
{{- toYaml .Values.dfdaemon.extraVolumeMounts | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if and (not .Values.dfdaemon.hostNetwork) .Values.dfdaemon.config.proxy.tcpListen.namespace }}
|
||||
- name: run
|
||||
mountPath: /run/dragonfly
|
||||
- name: data
|
||||
mountPath: {{ .Values.dfdaemon.config.dataDir }}
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- SYS_ADMIN
|
||||
{{- end }}
|
||||
{{- if or (and (not .Values.dfdaemon.hostNetwork) .Values.dfdaemon.config.proxy.tcpListen.namespace) .Values.containerRuntime.containerd.enable .Values.containerRuntime.docker.enable .Values.containerRuntime.crio.enable .Values.containerRuntime.extraInitContainers }}
|
||||
initContainers:
|
||||
{{- if .Values.scheduler.enable }}
|
||||
- name: wait-for-scheduler
|
||||
image: {{ template "dfdaemon.initContainer.image" . }}
|
||||
imagePullPolicy: {{ .Values.dfdaemon.initContainer.image.pullPolicy }}
|
||||
command: ['sh', '-c', 'until nslookup {{ template "dragonfly.scheduler.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} && nc -vz {{ template "dragonfly.scheduler.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} {{ .Values.scheduler.config.server.port }}; do echo waiting for scheduler; sleep 2; done;']
|
||||
{{- end }}
|
||||
{{- if and (not .Values.dfdaemon.hostNetwork) .Values.dfdaemon.config.proxy.tcpListen.namespace }}
|
||||
- name: mount-netns
|
||||
image: {{ template "dfdaemon.image" . }}
|
||||
imagePullPolicy: {{ .Values.dfdaemon.image.pullPolicy | quote }}
|
||||
resources:
|
||||
{{ toYaml .Values.dfdaemon.resources | indent 12 }}
|
||||
# FIXME dfget daemon only need /proc/1/ns/net and CAP_SYS_ADMIN,
|
||||
# but containerd resolves the symbolic of /proc/1/ns/net from v1.5.0.
|
||||
# due to /proc/1/ns/net is not a regular symbolic link, it always failed.
|
||||
# for keeping only CAP_SYS_ADMIN capability, use init container to bind mount only netns to /run/dragonfly/net.
|
||||
# https://github.com/containerd/containerd/blob/v1.5.0/pkg/cri/opts/spec_linux.go#L171.
|
||||
command:
|
||||
- /bin/sh
|
||||
- -cx
|
||||
- |-
|
||||
if [ ! -e "/run/dragonfly/net" ]; then
|
||||
touch /run/dragonfly/net
|
||||
fi
|
||||
i1=$(stat -L -c %i /host/ns/net)
|
||||
i2=$(stat -L -c %i /run/dragonfly/net)
|
||||
if [ "$i1" != "$i2" ]; then
|
||||
/bin/mount -o bind /host/ns/net /run/dragonfly/net
|
||||
fi
|
||||
volumeMounts:
|
||||
- name: hostns
|
||||
mountPath: /host/ns
|
||||
- name: run
|
||||
mountPath: /run/dragonfly
|
||||
# bind mount need Bidirectional to propagate into host.
|
||||
mountPropagation: Bidirectional
|
||||
securityContext:
|
||||
# open /proc/1/ns need privilege permission.
|
||||
privileged: true
|
||||
{{- end }}
|
||||
{{- if .Values.containerRuntime.extraInitContainers }}
|
||||
{{ toYaml .Values.containerRuntime.extraInitContainers | indent 6 }}
|
||||
{{- else if .Values.containerRuntime.docker.enable }}
|
||||
- name: update-docker-config
|
||||
image: {{ template "containerRuntime.initContainer.image" . }}
|
||||
imagePullPolicy: {{ .Values.dfdaemon.image.pullPolicy | quote }}
|
||||
resources:
|
||||
{{ toYaml .Values.dfdaemon.resources | indent 12 }}
|
||||
command:
|
||||
- /bin/sh
|
||||
- -cx
|
||||
- |-
|
||||
mkdir -p /tmp/dragonfly-ca
|
||||
cd /tmp/dragonfly-ca
|
||||
|
||||
openssl genrsa -out cakey.pem 2048
|
||||
|
||||
cat << EOF > root.conf
|
||||
[ req ]
|
||||
default_bits = 2048
|
||||
default_keyfile = key.pem
|
||||
default_md = sha256
|
||||
distinguished_name = req_distinguished_name
|
||||
req_extensions = req_ext
|
||||
string_mask = nombstr
|
||||
x509_extensions = x509_ext
|
||||
[ req_distinguished_name ]
|
||||
countryName = Country Name (2 letter code)
|
||||
countryName_default = {{.Values.containerRuntime.docker.caCert.countryName}}
|
||||
stateOrProvinceName = State or Province Name (full name)
|
||||
stateOrProvinceName_default = {{.Values.containerRuntime.docker.caCert.stateOrProvinceName}}
|
||||
localityName = Locality Name (eg, city)
|
||||
localityName_default = {{.Values.containerRuntime.docker.caCert.localityName}}
|
||||
organizationName = Organization Name (eg, company)
|
||||
organizationName_default = {{.Values.containerRuntime.docker.caCert.organizationName}}
|
||||
commonName = Common Name (e.g. server FQDN or YOUR name)
|
||||
commonName_max = 64
|
||||
commonName_default = {{.Values.containerRuntime.docker.caCert.commonName}}
|
||||
[ x509_ext ]
|
||||
authorityKeyIdentifier = keyid,issuer
|
||||
basicConstraints = CA:TRUE
|
||||
keyUsage = digitalSignature, keyEncipherment, keyCertSign, cRLSign
|
||||
subjectKeyIdentifier = hash
|
||||
[ req_ext ]
|
||||
basicConstraints = CA:TRUE
|
||||
keyUsage = digitalSignature, keyEncipherment, keyCertSign, cRLSign
|
||||
subjectKeyIdentifier = hash
|
||||
EOF
|
||||
|
||||
openssl req -batch -new -x509 -key ./cakey.pem -out ./cacert.pem -days 65536 -config ./root.conf
|
||||
openssl x509 -inform PEM -in ./cacert.pem -outform DER -out ./CA.cer
|
||||
|
||||
openssl x509 -in ./cacert.pem -noout -text
|
||||
# update ca for golang program(docker in host), refer: https://github.com/golang/go/blob/go1.17/src/crypto/x509/root_linux.go#L8
|
||||
ca_list="/etc/ssl/certs/ca-certificates.crt /etc/pki/tls/certs/ca-bundle.crt /etc/ssl/ca-bundle.pem /etc/pki/tls/cacert.pem /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem /etc/ssl/cert.pem"
|
||||
for ca in $ca_list; do
|
||||
ca="/host$ca"
|
||||
if [[ -e "$ca" ]]; then
|
||||
echo "CA $ca" found
|
||||
if grep "Dragonfly Authority CA" "$ca"; then
|
||||
echo "Dragonfly Authority ca found"
|
||||
if [[ -e /host/etc/dragonfly-ca/cakey.pem && -e /host/etc/dragonfly-ca/cacert.pem ]]; then
|
||||
echo "CA cert and key ready"
|
||||
break
|
||||
else
|
||||
echo "Warning: CA cert and key not ready"
|
||||
fi
|
||||
fi
|
||||
echo "Try to add Dragonfly CA"
|
||||
echo "# Dragonfly Authority CA" > cacert.toadd.pem
|
||||
cat cacert.pem >> cacert.toadd.pem
|
||||
cat cacert.toadd.pem >> "$ca"
|
||||
echo "Dragonfly CA added"
|
||||
cp -f ./cakey.pem ./cacert.pem /host/etc/dragonfly-ca/
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
{{- if not .Values.containerRuntime.docker.restart }}
|
||||
domains="{{- join " " .Values.containerRuntime.docker.registryDomains }}"
|
||||
if [[ -n "$domains" ]]; then
|
||||
for domain in $domains; do
|
||||
# inject docker cert by registry domain
|
||||
dir=/host/etc/docker/certs.d/$domain
|
||||
mkdir -p "$dir"
|
||||
echo copy CA cert to $dir
|
||||
cp -f /host/etc/dragonfly-ca/cacert.pem "$dir/ca.crt"
|
||||
done
|
||||
fi
|
||||
{{- end }}
|
||||
{{- if .Values.containerRuntime.docker.restart }}
|
||||
# inject docker proxy setting and restart docker
|
||||
# currently, without host pid in container, we can not nsenter with pid and can not invoke systemctl correctly.
|
||||
status=$(nsenter -t 1 -m -- systemctl status docker --no-pager | grep http-proxy.conf)
|
||||
if [[ -n "$status" ]]; then
|
||||
echo Docker proxy already enabled, skip
|
||||
else
|
||||
echo Try to inject proxy and restart docker
|
||||
path=$(nsenter -t 1 -m -- systemctl show -p FragmentPath docker.service | grep -o "/.*systemd.*")
|
||||
if [[ -z "$path" ]]; then
|
||||
echo docker.service not found
|
||||
exit 1
|
||||
fi
|
||||
nsenter -t 1 -m -- mkdir -p "$path".d
|
||||
nsenter -t 1 -m -- sh -c "echo '[Service]' > $path.d/http-proxy.conf"
|
||||
nsenter -t 1 -m -- sh -c "echo 'Environment=\"HTTP_PROXY=http://127.0.0.1:{{ .Values.dfdaemon.hostPort}}\"' >> $path.d/http-proxy.conf"
|
||||
nsenter -t 1 -m -- sh -c "echo 'Environment=\"HTTPS_PROXY=http://127.0.0.1:{{ .Values.dfdaemon.hostPort}}\"' >> $path.d/http-proxy.conf"
|
||||
nsenter -t 1 -m -- sh -c "echo 'Environment=\"NO_PROXY={{ join "," .Values.containerRuntime.docker.skipHosts }}\"' >> $path.d/http-proxy.conf"
|
||||
nsenter -t 1 -m -- systemctl daemon-reload
|
||||
nsenter -t 1 -m -- systemctl restart docker.service
|
||||
fi
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- name: etc
|
||||
mountPath: /host/etc
|
||||
{{- if .Values.containerRuntime.docker.restart }}
|
||||
securityContext:
|
||||
# nsenter need privilege permission.
|
||||
privileged: true
|
||||
{{- end }}
|
||||
{{- else if .Values.containerRuntime.containerd.enable }}
|
||||
- name: update-containerd
|
||||
image: {{ template "containerRuntime.initContainer.image" . }}
|
||||
imagePullPolicy: {{ .Values.dfdaemon.image.pullPolicy | quote }}
|
||||
resources:
|
||||
{{ toYaml .Values.dfdaemon.resources | indent 12 }}
|
||||
command:
|
||||
- /bin/sh
|
||||
- -cx
|
||||
- |-
|
||||
etcContainerd=/host{{ .Values.containerRuntime.containerd.configPathDir }}
|
||||
if [[ -e $etcContainerd/{{ default "config.toml" .Values.containerRuntime.containerd.configFileName }} ]]; then
|
||||
echo containerd config found
|
||||
else
|
||||
echo $etcContainerd/{{ default "config.toml" .Values.containerRuntime.containerd.configFileName }} not found
|
||||
exit 1
|
||||
fi
|
||||
cat $etcContainerd/{{ default "config.toml" .Values.containerRuntime.containerd.configFileName }}
|
||||
|
||||
registries="{{- join " " .Values.containerRuntime.containerd.registries }}"
|
||||
if [[ -n "$domains" ]]; then
|
||||
echo empty registry domains
|
||||
exit 1
|
||||
fi
|
||||
# detect containerd config version
|
||||
need_restart=0
|
||||
if grep "version[^=]*=[^2]*2" $etcContainerd/{{ default "config.toml" .Values.containerRuntime.containerd.configFileName }}; then
|
||||
# inject v2 mirror setting
|
||||
|
||||
# get config_path if set
|
||||
{{- if .Values.containerRuntime.containerd.injectConfigPath }}
|
||||
config_path=$etcContainerd/certs.d
|
||||
{{- else }}
|
||||
config_path=$(cat $etcContainerd/{{ default "config.toml" .Values.containerRuntime.containerd.configFileName }} | tr '"' ' ' | grep config_path | awk '{print $3}')
|
||||
{{- end }}
|
||||
if [[ -z "$config_path" ]]; then
|
||||
echo config_path is not enabled, just add one mirror in {{ default "config.toml" .Values.containerRuntime.containerd.configFileName }}
|
||||
# parse registry domain
|
||||
registry={{ .Values.dfdaemon.config.proxy.registryMirror.url}}
|
||||
domain=$(echo $registry | sed -e "s,http.*://,," | sed "s,:.*,,")
|
||||
# inject registry
|
||||
if grep "registry.mirrors.\"$domain\"" $etcContainerd/{{ default "config.toml" .Values.containerRuntime.containerd.configFileName }}; then
|
||||
# TODO merge mirrors
|
||||
echo "registry $registry found in {{ default "config.toml" .Values.containerRuntime.containerd.configFileName }}, skip"
|
||||
else
|
||||
cat << EOF >> $etcContainerd/{{ default "config.toml" .Values.containerRuntime.containerd.configFileName }}
|
||||
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."$domain"]
|
||||
endpoint = ["http://127.0.0.1:{{ .Values.dfdaemon.hostPort}}","$registry"]
|
||||
EOF
|
||||
echo "Registry $domain added"
|
||||
need_restart=1
|
||||
fi
|
||||
{{- if .Values.containerRuntime.containerd.injectRegistryCredencials.enable}}
|
||||
if grep "registry.configs.\"$domain\".auth" $etcContainerd/{{ default "config.toml" .Values.containerRuntime.containerd.configFileName }}; then
|
||||
echo "registry auth $registry found in {{ default "config.toml" .Values.containerRuntime.containerd.configFileName }}, skip"
|
||||
else
|
||||
cat << EOF >> $etcContainerd/{{ default "config.toml" .Values.containerRuntime.containerd.configFileName }}
|
||||
[plugins."io.containerd.grpc.v1.cri".registry.configs."$domain".auth]
|
||||
username = "{{ .Values.containerRuntime.containerd.injectRegistryCredencials.username }}"
|
||||
password = "{{ .Values.containerRuntime.containerd.injectRegistryCredencials.password }}"
|
||||
auth = "{{ .Values.containerRuntime.containerd.injectRegistryCredencials.auth }}"
|
||||
identitytoken = "{{ .Values.containerRuntime.containerd.injectRegistryCredencials.identitytoken }}"
|
||||
EOF
|
||||
echo "Registry auth $domain added"
|
||||
need_restart=1
|
||||
fi
|
||||
{{- end }}
|
||||
else
|
||||
echo config_path is enabled, add mirror in $config_path
|
||||
# TODO check whether config_path is enabled, if not, add it
|
||||
tmp=$(cat $etcContainerd/{{ default "config.toml" .Values.containerRuntime.containerd.configFileName }} | tr '"' ' ' | grep config_path | awk '{print $3}')
|
||||
if [[ -z "$tmp" ]]; then
|
||||
echo inject config_path into $etcContainerd/{{ default "config.toml" .Values.containerRuntime.containerd.configFileName }}
|
||||
if grep -q '\[plugins."io.containerd.grpc.v1.cri".registry\]' $etcContainerd/{{ default "config.toml" .Values.containerRuntime.containerd.configFileName }}; then
|
||||
sed -i 's|\[plugins."io.containerd.grpc.v1.cri".registry\]|\[plugins."io.containerd.grpc.v1.cri".registry\]\nconfig_path = "{{ .Values.containerRuntime.containerd.configPathDir }}/certs.d"|g' $etcContainerd/{{ default "config.toml" .Values.containerRuntime.containerd.configFileName }}
|
||||
else
|
||||
cat << EOF >> $etcContainerd/{{ default "config.toml" .Values.containerRuntime.containerd.configFileName }}
|
||||
[plugins."io.containerd.grpc.v1.cri".registry]
|
||||
config_path = "{{ .Values.containerRuntime.containerd.configPathDir }}/certs.d"
|
||||
EOF
|
||||
fi
|
||||
echo "Registry config_path $config_path added"
|
||||
need_restart=1
|
||||
fi
|
||||
mkdir -p $etcContainerd/certs.d
|
||||
for registry in $registries; do
|
||||
# If the registry is docker.io, then the domain name should
|
||||
# be changed to index.docker.io.
|
||||
if [ $registry == "https://docker.io" ]; then
|
||||
registry_domain=https://index.docker.io
|
||||
elif [ $registry == "http://docker.io" ]; then
|
||||
registry_domain=http://index.docker.io
|
||||
else
|
||||
registry_domain=$registry
|
||||
fi
|
||||
# parse registry domain
|
||||
domain=$(echo $registry | sed -e "s,http.*://,,")
|
||||
# inject registry
|
||||
mkdir -p $etcContainerd/certs.d/$domain
|
||||
if [[ -e "$etcContainerd/certs.d/$domain/hosts.toml" ]]; then
|
||||
echo "registry $registry found in {{ default "config.toml" .Values.containerRuntime.containerd.configFileName }}, skip"
|
||||
continue
|
||||
else
|
||||
cat << EOF >> $etcContainerd/certs.d/$domain/hosts.toml
|
||||
server = "$registry_domain"
|
||||
[host."http://127.0.0.1:{{ .Values.dfdaemon.hostPort}}"]
|
||||
capabilities = ["pull", "resolve"]
|
||||
[host."http://127.0.0.1:{{ .Values.dfdaemon.hostPort}}".header]
|
||||
X-Dragonfly-Registry = ["$registry_domain"]
|
||||
[host."$registry_domain"]
|
||||
capabilities = ["pull", "resolve"]
|
||||
EOF
|
||||
echo "Registry $domain added"
|
||||
{{- if not .Values.containerRuntime.containerd.injectConfigPath }}
|
||||
need_restart=1
|
||||
{{- end }}
|
||||
fi
|
||||
done
|
||||
fi
|
||||
else
|
||||
# inject legacy v1 mirror setting
|
||||
echo containerd config is version 1, just only support one mirror in {{ default "config.toml" .Values.containerRuntime.containerd.configFileName }}
|
||||
# parse registry domain
|
||||
registry={{ .Values.dfdaemon.config.proxy.registryMirror.url}}
|
||||
domain=$(echo {{ .Values.dfdaemon.config.proxy.registryMirror.url}} | sed -e "s,http.*://,," | sed "s,:.*,,")
|
||||
# inject registry
|
||||
if grep "registry.mirrors.\"$domain\"" $etcContainerd/{{ default "config.toml" .Values.containerRuntime.containerd.configFileName }}; then
|
||||
# TODO merge mirrors
|
||||
echo "registry $registry found in {{ default "config.toml" .Values.containerRuntime.containerd.configFileName }}, skip"
|
||||
else
|
||||
cat << EOF >> $etcContainerd/{{ default "config.toml" .Values.containerRuntime.containerd.configFileName }}
|
||||
[plugins.cri.registry.mirrors."$domain"]
|
||||
endpoint = ["http://127.0.0.1:{{ .Values.dfdaemon.hostPort}}","$registry"]
|
||||
EOF
|
||||
echo "Registry $domain added"
|
||||
need_restart=1
|
||||
fi
|
||||
{{- if .Values.containerRuntime.containerd.injectRegistryCredencials.enable}}
|
||||
if grep "registry.configs.\"$domain\".auth" $etcContainerd/{{ default "config.toml" .Values.containerRuntime.containerd.configFileName }}; then
|
||||
echo "registry auth $registry found in {{ default "config.toml" .Values.containerRuntime.containerd.configFileName }}, skip"
|
||||
else
|
||||
cat << EOF >> $etcContainerd/{{ default "config.toml" .Values.containerRuntime.containerd.configFileName }}
|
||||
[plugins."io.containerd.grpc.v1.cri".registry.configs."$domain".auth]
|
||||
username = "{{ .Values.containerRuntime.containerd.injectRegistryCredencials.username }}"
|
||||
password = "{{ .Values.containerRuntime.containerd.injectRegistryCredencials.password }}"
|
||||
auth = "{{ .Values.containerRuntime.containerd.injectRegistryCredencials.auth }}"
|
||||
EOF
|
||||
echo "Registry auth $domain added"
|
||||
need_restart=1
|
||||
fi
|
||||
{{- end }}
|
||||
fi
|
||||
# restart containerd
|
||||
# currently, without host pid in container, we can not nsenter with pid and can not invoke systemctl correctly.
|
||||
if [[ "$need_restart" -gt 0 ]]; then
|
||||
nsenter -t 1 -m -- systemctl restart containerd.service
|
||||
fi
|
||||
volumeMounts:
|
||||
- name: containerd-conf
|
||||
mountPath: /host{{ .Values.containerRuntime.containerd.configPathDir }}
|
||||
securityContext:
|
||||
# nsenter need privilege permission.
|
||||
privileged: true
|
||||
{{- else if .Values.containerRuntime.crio.enable }}
|
||||
- name: update-crio
|
||||
image: {{ template "containerRuntime.initContainer.image" . }}
|
||||
imagePullPolicy: {{ .Values.dfdaemon.image.pullPolicy | quote }}
|
||||
resources:
|
||||
{{ toYaml .Values.dfdaemon.resources | indent 12 }}
|
||||
command:
|
||||
- /bin/sh
|
||||
- -cx
|
||||
- |-
|
||||
registries="{{- join " " .Values.containerRuntime.crio.registries }}"
|
||||
if [[ -n "$domains" ]]; then
|
||||
echo Empty registry domains
|
||||
exit 1
|
||||
fi
|
||||
|
||||
confd="/host/etc/containers/registries.conf.d"
|
||||
if [[ ! -e "$confd" ]]; then
|
||||
mkdir -p "$confd"
|
||||
fi
|
||||
|
||||
for registry in $registries; do
|
||||
# parse registry domain
|
||||
domain=$(echo $registry | sed "s,http.://,," | sed "s,:.*,,")
|
||||
schema=$(echo $registry | sed "s,://.*,,")
|
||||
port=$(echo $registry | sed "s,http.://,," | sed "s,[^:]*,," | sed "s,:,,")
|
||||
insecure=false
|
||||
if [[ "$schema" == "http" ]]; then
|
||||
insecure=true
|
||||
fi
|
||||
if [[ -z "$port" ]]; then
|
||||
if [[ "$schema" == "https" ]]; then
|
||||
port=443
|
||||
elif [[ "$schema" == "http" ]]; then
|
||||
port=80
|
||||
fi
|
||||
fi
|
||||
echo schema: $schema, domain: $domain, port: $port
|
||||
# inject registry
|
||||
if [[ -e "$confd/$domain.conf" ]]; then
|
||||
echo "registry $registry found in $confd, skip"
|
||||
continue
|
||||
else
|
||||
cat << EOF > "$confd/$domain.conf"
|
||||
[[registry]]
|
||||
prefix = "$domain"
|
||||
location = "$domain:$port"
|
||||
insecure = $insecure
|
||||
[[registry.mirror]]
|
||||
location = "127.0.0.1:{{ .Values.dfdaemon.hostPort}}"
|
||||
insecure = true
|
||||
EOF
|
||||
echo "Registry $domain added"
|
||||
fi
|
||||
done
|
||||
nsenter -t 1 -m -- systemctl reload crio.service
|
||||
volumeMounts:
|
||||
- name: etc
|
||||
mountPath: /host/etc
|
||||
securityContext:
|
||||
# nsenter need privilege permission.
|
||||
privileged: true
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: {{ template "dragonfly.dfdaemon.fullname" . }}
|
||||
{{- if and (not .Values.dfdaemon.hostNetwork) .Values.dfdaemon.config.proxy.tcpListen.namespace }}
|
||||
- name: hostns
|
||||
hostPath:
|
||||
path: /proc/1/ns
|
||||
- name: run
|
||||
hostPath:
|
||||
path: /run/dragonfly
|
||||
type: DirectoryOrCreate
|
||||
{{- end }}
|
||||
{{- if .Values.containerRuntime.docker.enable }}
|
||||
- name: etc
|
||||
hostPath:
|
||||
path: /etc
|
||||
- name: d7y-ca
|
||||
hostPath:
|
||||
path: /etc/dragonfly-ca
|
||||
type: DirectoryOrCreate
|
||||
{{- else if .Values.containerRuntime.containerd.enable }}
|
||||
- name: containerd-conf
|
||||
hostPath:
|
||||
path: {{ .Values.containerRuntime.containerd.configPathDir }}
|
||||
{{- else if .Values.containerRuntime.crio.enable }}
|
||||
- name: etc
|
||||
hostPath:
|
||||
path: /etc
|
||||
{{- end }}
|
||||
- name: data
|
||||
{{- if .Values.dfdaemon.mountDataDirAsHostPath }}
|
||||
hostPath:
|
||||
path: {{ .Values.dfdaemon.config.dataDir }}
|
||||
type: DirectoryOrCreate
|
||||
{{- else }}
|
||||
emptyDir: {}
|
||||
{{- end }}
|
||||
{{- if .Values.dfdaemon.extraVolumes }}
|
||||
{{- toYaml .Values.dfdaemon.extraVolumes | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -1,33 +0,0 @@
|
|||
{{- if and .Values.dfdaemon.metrics.enable .Values.dfdaemon.metrics.podMonitor.enable }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PodMonitor
|
||||
metadata:
|
||||
name: {{ template "dragonfly.dfdaemon.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.dfdaemon.name }}
|
||||
{{- if .Values.dfdaemon.metrics.podMonitor.additionalLabels }}
|
||||
{{ toYaml .Values.dfdaemon.metrics.podMonitor.additionalLabels | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
podMetricsEndpoints:
|
||||
- port: "8000"
|
||||
path: /metrics
|
||||
{{- if .Values.dfdaemon.metrics.podMonitor.interval }}
|
||||
interval: {{ .Values.dfdaemon.metrics.podMonitor.interval }}
|
||||
{{- end }}
|
||||
{{- if .Values.dfdaemon.metrics.podMonitor.scrapeTimeout }}
|
||||
scrapeTimeout: {{ .Values.dfdaemon.metrics.podMonitor.scrapeTimeout }}
|
||||
{{- end }}
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- {{ .Release.Namespace }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
component: {{ .Values.dfdaemon.name }}
|
||||
{{- end }}
|
|
@ -1,20 +0,0 @@
|
|||
{{- if and .Values.dfdaemon.metrics.enable .Values.dfdaemon.metrics.prometheusRule.enable }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: {{ template "dragonfly.dfdaemon.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.dfdaemon.name }}
|
||||
{{- if .Values.dfdaemon.metrics.prometheusRule.additionalLabels }}
|
||||
{{ toYaml .Values.dfdaemon.metrics.prometheusRule.additionalLabels | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
groups:
|
||||
- name: {{ template "dragonfly.dfdaemon.fullname" $ }}
|
||||
rules:
|
||||
{{ toYaml .Values.dfdaemon.metrics.prometheusRule.rules | indent 8 }}
|
||||
{{- end }}
|
|
@ -3,9 +3,9 @@ apiVersion: v1
|
|||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "dragonfly.manager.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.manager.name }}
|
||||
|
@ -25,6 +25,7 @@ data:
|
|||
start: {{ .Values.manager.grpcPort }}
|
||||
end: {{ .Values.manager.grpcPort }}
|
||||
workHome: {{ .Values.manager.config.server.workHome }}
|
||||
logLevel: {{ .Values.manager.config.server.logLevel }}
|
||||
logDir: {{ .Values.manager.config.server.logDir }}
|
||||
cacheDir: {{ .Values.manager.config.server.cacheDir }}
|
||||
pluginDir: {{ .Values.manager.config.server.pluginDir }}
|
||||
|
@ -58,6 +59,8 @@ data:
|
|||
masterName: {{ .Values.externalRedis.masterName }}
|
||||
username: {{ .Values.externalRedis.username }}
|
||||
password: {{ .Values.externalRedis.password }}
|
||||
sentinelUsername: {{ .Values.externalRedis.sentinelUsername }}
|
||||
sentinelPassword: {{ .Values.externalRedis.sentinelPassword }}
|
||||
db: {{ .Values.externalRedis.db }}
|
||||
brokerDB: {{ .Values.externalRedis.brokerDB }}
|
||||
backendDB: {{ .Values.externalRedis.backendDB }}
|
||||
|
@ -66,23 +69,13 @@ data:
|
|||
{{ toYaml .Values.manager.config.cache | indent 6 }}
|
||||
job:
|
||||
{{ toYaml .Values.manager.config.job | indent 6 }}
|
||||
objectStorage:
|
||||
{{ toYaml .Values.manager.config.objectStorage | indent 6 }}
|
||||
security:
|
||||
{{ toYaml .Values.manager.config.security | indent 6 }}
|
||||
network:
|
||||
{{ toYaml .Values.manager.config.network | indent 6 }}
|
||||
metrics:
|
||||
enable: {{ .Values.manager.metrics.enable }}
|
||||
addr: ":8000"
|
||||
console: {{ .Values.manager.config.console }}
|
||||
verbose: {{ .Values.manager.config.verbose }}
|
||||
{{- if .Values.manager.config.verbose }}
|
||||
pprof-port: {{ .Values.manager.config.pprofPort }}
|
||||
{{- end }}
|
||||
{{- if .Values.manager.config.jaeger }}
|
||||
jaeger: {{ .Values.manager.config.jaeger }}
|
||||
{{- else if .Values.jaeger.enable }}
|
||||
jaeger: http://{{ $.Release.Name }}-jaeger-collector.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }}:14268/api/traces
|
||||
{{- end }}
|
||||
pprofPort: {{ .Values.manager.config.pprofPort }}
|
||||
tracing:
|
||||
{{ toYaml .Values.manager.config.tracing | indent 6 }}
|
||||
{{- end }}
|
||||
|
|
|
@ -4,11 +4,11 @@ kind: Deployment
|
|||
metadata:
|
||||
labels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.manager.name }}
|
||||
name: {{ template "dragonfly.manager.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
annotations:
|
||||
{{- if .Values.manager.deploymentAnnotations }}
|
||||
{{ toYaml .Values.manager.deploymentAnnotations | indent 4 }}
|
||||
|
@ -22,13 +22,11 @@ spec:
|
|||
matchLabels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
component: {{ .Values.manager.name }}
|
||||
release: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
component: {{ .Values.manager.name }}
|
||||
release: {{ .Release.Name }}
|
||||
{{- if .Values.manager.podLabels }}
|
||||
{{ toYaml .Values.manager.podLabels | indent 8 }}
|
||||
{{- end }}
|
||||
|
@ -38,6 +36,10 @@ spec:
|
|||
{{ toYaml .Values.manager.podAnnotations | indent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
hostNetwork: {{ .Values.manager.hostNetwork }}
|
||||
{{- if .Values.manager.hostNetwork }}
|
||||
dnsPolicy: "ClusterFirstWithHostNet"
|
||||
{{- end }}
|
||||
{{- with .Values.manager.nodeSelector | default .Values.global.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
|
@ -53,7 +55,7 @@ spec:
|
|||
{{- if quote .Values.manager.terminationGracePeriodSeconds }}
|
||||
terminationGracePeriodSeconds: {{ .Values.manager.terminationGracePeriodSeconds }}
|
||||
{{- end }}
|
||||
{{- if and (.Capabilities.APIVersions.Has "scheduling.k8s.io/v1beta1") (.Values.scheduler.priorityClassName) }}
|
||||
{{- if .Values.scheduler.priorityClassName }}
|
||||
priorityClassName: {{ .Values.manager.priorityClassName }}
|
||||
{{- end }}
|
||||
{{- with .Values.manager.image.pullSecrets | default .Values.global.imagePullSecrets }}
|
||||
|
@ -71,12 +73,16 @@ spec:
|
|||
image: {{ template "manager.initContainer.image" . }}
|
||||
imagePullPolicy: {{ .Values.manager.initContainer.image.pullPolicy }}
|
||||
command: ['sh', '-c', 'until nslookup {{ .Release.Name }}-{{ default "redis" .Values.redis.fullname }}-master.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }} && nc -vz {{ .Release.Name }}-{{ default "redis" .Values.redis.fullname }}-master.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }} {{ .Values.redis.master.service.ports.redis }}; do echo waiting for redis; sleep 2; done;']
|
||||
resources:
|
||||
{{ toYaml .Values.manager.initContainer.resources | indent 10 }}
|
||||
{{- end }}
|
||||
{{- if .Values.mysql.enable }}
|
||||
- name: wait-for-mysql
|
||||
image: {{ template "manager.initContainer.image" . }}
|
||||
imagePullPolicy: {{ .Values.manager.initContainer.image.pullPolicy }}
|
||||
command: ['sh', '-c', 'until nslookup {{ .Release.Name }}-{{ default "mysql" .Values.mysql.fullname }}.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }} && nc -vz {{ .Release.Name }}-{{ default "mysql" .Values.mysql.fullname }}.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }} {{ .Values.mysql.primary.service.port }}; do echo waiting for mysql; sleep 2; done;']
|
||||
resources:
|
||||
{{ toYaml .Values.manager.initContainer.resources | indent 10 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
containers:
|
||||
|
@ -84,7 +90,7 @@ spec:
|
|||
image: {{ template "manager.image" . }}
|
||||
imagePullPolicy: {{ .Values.manager.image.pullPolicy | quote }}
|
||||
resources:
|
||||
{{ toYaml .Values.manager.resources | indent 12 }}
|
||||
{{ toYaml .Values.manager.resources | indent 10 }}
|
||||
env:
|
||||
{{- if .Values.manager.maxProcs }}
|
||||
- name: GOMAXPROCS
|
||||
|
|
|
@ -9,9 +9,9 @@ apiVersion: extensions/v1beta1
|
|||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ template "dragonfly.manager.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.manager.name }}
|
||||
|
|
|
@ -3,9 +3,9 @@ apiVersion: v1
|
|||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "dragonfly.manager.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.manager.name }}
|
||||
|
@ -21,14 +21,18 @@ spec:
|
|||
ports:
|
||||
- port: {{ .Values.manager.restPort }}
|
||||
name: http-rest
|
||||
appProtocol: http
|
||||
protocol: TCP
|
||||
targetPort: {{ .Values.manager.restPort }}
|
||||
- port: {{ .Values.manager.grpcPort }}
|
||||
name: http-grpc
|
||||
name: grpc
|
||||
appProtocol: grpc
|
||||
protocol: TCP
|
||||
targetPort: {{ .Values.manager.grpcPort }}
|
||||
{{- if eq .Values.manager.service.type "NodePort" }}
|
||||
nodePort: {{ .Values.manager.service.nodePort }}
|
||||
{{- end }}
|
||||
selector:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
release: {{ .Release.Name }}
|
||||
component: {{ .Values.manager.name }}
|
||||
{{- end }}
|
||||
|
|
|
@ -3,14 +3,14 @@ apiVersion: v1
|
|||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "dragonfly.manager.fullname" . }}-metrics
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.manager.name }}-metrics
|
||||
{{- if .Values.manager.metrics.service.labels }}
|
||||
{{ toYaml .Values.metrics.service.labels | indent 4 }}
|
||||
{{ toYaml .Values.manager.metrics.service.labels | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.manager.metrics.service.annotations }}
|
||||
annotations:
|
||||
|
@ -21,10 +21,10 @@ spec:
|
|||
ports:
|
||||
- port: 8000
|
||||
name: http-metrics
|
||||
targetPort: 8000
|
||||
appProtocol: http
|
||||
protocol: TCP
|
||||
targetPort: 8000
|
||||
selector:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
release: {{ .Release.Name }}
|
||||
component: {{ .Values.manager.name }}
|
||||
{{- end }}
|
||||
|
|
|
@ -3,9 +3,9 @@ apiVersion: monitoring.coreos.com/v1
|
|||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: {{ template "dragonfly.manager.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.manager.name }}
|
||||
|
|
|
@ -3,9 +3,9 @@ apiVersion: monitoring.coreos.com/v1
|
|||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ template "dragonfly.manager.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.manager.name }}
|
||||
|
@ -27,6 +27,5 @@ spec:
|
|||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
component: {{ .Values.manager.name }}-metrics
|
||||
{{- end }}
|
||||
|
|
|
@ -3,14 +3,14 @@ apiVersion: v1
|
|||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "dragonfly.scheduler.fullname" . }}-metrics
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.scheduler.name }}-metrics
|
||||
{{- if .Values.scheduler.metrics.service.labels }}
|
||||
{{ toYaml .Values.metrics.service.labels | indent 4 }}
|
||||
{{ toYaml .Values.scheduler.metrics.service.labels | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.scheduler.metrics.service.annotations }}
|
||||
annotations:
|
||||
|
@ -21,10 +21,10 @@ spec:
|
|||
ports:
|
||||
- port: 8000
|
||||
name: http-metrics
|
||||
targetPort: 8000
|
||||
appProtocol: http
|
||||
protocol: TCP
|
||||
targetPort: 8000
|
||||
selector:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
component: {{ .Values.scheduler.name }}
|
||||
release: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
|
|
@ -3,9 +3,9 @@ apiVersion: monitoring.coreos.com/v1
|
|||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: {{ template "dragonfly.scheduler.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.scheduler.name }}
|
||||
|
|
|
@ -3,9 +3,9 @@ apiVersion: v1
|
|||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "dragonfly.scheduler.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.scheduler.name }}
|
||||
|
@ -27,12 +27,11 @@ data:
|
|||
masterName: {{ .Values.externalRedis.masterName }}
|
||||
username: {{ .Values.externalRedis.username }}
|
||||
password: {{ .Values.externalRedis.password }}
|
||||
sentinelUsername: {{ .Values.externalRedis.sentinelUsername }}
|
||||
sentinelPassword: {{ .Values.externalRedis.sentinelPassword }}
|
||||
brokerDB: {{ .Values.externalRedis.brokerDB }}
|
||||
backendDB: {{ .Values.externalRedis.backendDB }}
|
||||
networkTopologyDB: {{ .Values.externalRedis.networkTopologyDB }}
|
||||
{{- end }}
|
||||
resource:
|
||||
{{ toYaml .Values.scheduler.config.resource | indent 6 }}
|
||||
dynconfig:
|
||||
{{ toYaml .Values.scheduler.config.dynconfig | indent 6 }}
|
||||
host:
|
||||
|
@ -60,13 +59,13 @@ data:
|
|||
masterName: {{ .Values.externalRedis.masterName }}
|
||||
username: {{ .Values.externalRedis.username }}
|
||||
password: {{ .Values.externalRedis.password }}
|
||||
sentinelUsername: {{ .Values.externalRedis.sentinelUsername }}
|
||||
sentinelPassword: {{ .Values.externalRedis.sentinelPassword }}
|
||||
brokerDB: {{ .Values.externalRedis.brokerDB }}
|
||||
backendDB: {{ .Values.externalRedis.backendDB }}
|
||||
{{- end }}
|
||||
storage:
|
||||
{{ toYaml .Values.scheduler.config.storage | indent 6 }}
|
||||
security:
|
||||
{{ toYaml .Values.scheduler.config.security | indent 6 }}
|
||||
network:
|
||||
{{ toYaml .Values.scheduler.config.network | indent 6 }}
|
||||
metrics:
|
||||
|
@ -74,13 +73,7 @@ data:
|
|||
addr: ":8000"
|
||||
enableHost: {{ .Values.scheduler.metrics.enableHost }}
|
||||
console: {{ .Values.scheduler.config.console }}
|
||||
verbose: {{ .Values.scheduler.config.verbose }}
|
||||
{{- if .Values.scheduler.config.verbose }}
|
||||
pprof-port: {{ .Values.scheduler.config.pprofPort }}
|
||||
{{- end }}
|
||||
{{- if .Values.scheduler.config.jaeger }}
|
||||
jaeger: {{ .Values.scheduler.config.jaeger }}
|
||||
{{- else if .Values.jaeger.enable }}
|
||||
jaeger: http://{{ $.Release.Name }}-jaeger-collector.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }}:14268/api/traces
|
||||
{{- end }}
|
||||
pprofPort: {{ .Values.scheduler.config.pprofPort }}
|
||||
tracing:
|
||||
{{ toYaml .Values.scheduler.config.tracing | indent 6 }}
|
||||
{{- end }}
|
||||
|
|
|
@ -3,9 +3,9 @@ apiVersion: apps/v1
|
|||
kind: StatefulSet
|
||||
metadata:
|
||||
name: {{ template "dragonfly.scheduler.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.scheduler.name }}
|
||||
|
@ -22,14 +22,12 @@ spec:
|
|||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
release: {{ .Release.Name }}
|
||||
component: {{ .Values.scheduler.name }}
|
||||
serviceName: scheduler
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
release: {{ .Release.Name }}
|
||||
component: {{ .Values.scheduler.name }}
|
||||
{{- if .Values.scheduler.podLabels }}
|
||||
{{ toYaml .Values.scheduler.podLabels | indent 8 }}
|
||||
|
@ -40,6 +38,10 @@ spec:
|
|||
{{ toYaml .Values.scheduler.podAnnotations | indent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
hostNetwork: {{ .Values.scheduler.hostNetwork }}
|
||||
{{- if .Values.scheduler.hostNetwork }}
|
||||
dnsPolicy: "ClusterFirstWithHostNet"
|
||||
{{- end }}
|
||||
{{- with .Values.scheduler.nodeSelector | default .Values.global.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
|
@ -55,7 +57,7 @@ spec:
|
|||
{{- if quote .Values.scheduler.terminationGracePeriodSeconds }}
|
||||
terminationGracePeriodSeconds: {{ .Values.scheduler.terminationGracePeriodSeconds }}
|
||||
{{- end }}
|
||||
{{- if and (.Capabilities.APIVersions.Has "scheduling.k8s.io/v1beta1") (.Values.scheduler.priorityClassName) }}
|
||||
{{- if .Values.scheduler.priorityClassName }}
|
||||
priorityClassName: {{ .Values.scheduler.priorityClassName }}
|
||||
{{- end }}
|
||||
{{- with .Values.scheduler.image.pullSecrets | default .Values.global.imagePullSecrets }}
|
||||
|
@ -75,12 +77,14 @@ spec:
|
|||
{{- else }}
|
||||
command: ['sh', '-c', 'until nslookup {{ .Values.externalManager.host }} && nc -vz {{ .Values.externalManager.host }} {{ .Values.externalManager.restPort }}; do echo waiting for external manager; sleep 2; done;']
|
||||
{{- end }}
|
||||
resources:
|
||||
{{ toYaml .Values.scheduler.initContainer.resources | indent 10 }}
|
||||
containers:
|
||||
- name: scheduler
|
||||
image: {{ template "scheduler.image" . }}
|
||||
imagePullPolicy: {{ .Values.scheduler.image.pullPolicy | quote }}
|
||||
resources:
|
||||
{{ toYaml .Values.scheduler.resources | indent 12 }}
|
||||
{{ toYaml .Values.scheduler.resources | indent 10 }}
|
||||
env:
|
||||
{{- if .Values.scheduler.maxProcs }}
|
||||
- name: GOMAXPROCS
|
||||
|
|
|
@ -3,9 +3,9 @@ apiVersion: v1
|
|||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "dragonfly.scheduler.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.scheduler.name }}
|
||||
|
@ -20,11 +20,14 @@ spec:
|
|||
type: {{ .Values.scheduler.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.scheduler.config.server.port }}
|
||||
name: http-grpc
|
||||
name: grpc
|
||||
appProtocol: grpc
|
||||
protocol: TCP
|
||||
targetPort: {{ .Values.scheduler.config.server.port }}
|
||||
{{- if eq .Values.scheduler.service.type "NodePort" }}
|
||||
nodePort: {{ .Values.scheduler.service.nodePort }}
|
||||
{{- end }}
|
||||
selector:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
release: {{ .Release.Name }}
|
||||
component: {{ .Values.scheduler.name }}
|
||||
{{- end }}
|
||||
|
|
|
@ -3,9 +3,9 @@ apiVersion: monitoring.coreos.com/v1
|
|||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ template "dragonfly.scheduler.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.scheduler.name }}
|
||||
|
@ -27,6 +27,5 @@ spec:
|
|||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
component: {{ .Values.scheduler.name }}-metrics
|
||||
{{- end }}
|
||||
|
|
|
@ -1,16 +1,16 @@
|
|||
{{- if and .Values.seedClient.metrics.enable .Values.seedClient.metrics.serviceMonitor.enable }}
|
||||
{{- if .Values.seedClient.metrics.enable }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "dragonfly.seedClient.fullname" . }}-metrics
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.seedClient.name }}-metrics
|
||||
{{- if .Values.seedClient.metrics.service.labels }}
|
||||
{{ toYaml .Values.metrics.service.labels | indent 4 }}
|
||||
{{ toYaml .Values.seedClient.metrics.service.labels | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.seedClient.metrics.service.annotations }}
|
||||
annotations:
|
||||
|
@ -21,10 +21,10 @@ spec:
|
|||
ports:
|
||||
- port: {{ .Values.seedClient.config.metrics.server.port }}
|
||||
name: http-metrics
|
||||
targetPort: {{ .Values.seedClient.config.metrics.server.port }}
|
||||
appProtocol: http
|
||||
protocol: TCP
|
||||
targetPort: {{ .Values.seedClient.config.metrics.server.port }}
|
||||
selector:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
component: {{ .Values.seedClient.name }}
|
||||
release: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
|
|
@ -3,9 +3,9 @@ apiVersion: monitoring.coreos.com/v1
|
|||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: {{ template "dragonfly.seedClient.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.seedClient.name }}
|
||||
|
|
|
@ -3,9 +3,9 @@ apiVersion: v1
|
|||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "dragonfly.seedClient.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.seedClient.name }}
|
||||
|
@ -20,13 +20,12 @@ data:
|
|||
upload:
|
||||
{{ toYaml .Values.seedClient.config.upload | indent 6 }}
|
||||
manager:
|
||||
addrs:
|
||||
{{- if .Values.seedClient.config.manager.addrs }}
|
||||
{{ toYaml .Values.seedClient.config.manager.addrs | indent 6 }}
|
||||
addr: {{ .Values.seedClient.config.manager.addr }}
|
||||
{{- else if .Values.manager.enable }}
|
||||
- http://{{ template "dragonfly.manager.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.manager.grpcPort }}
|
||||
addr: http://{{ template "dragonfly.manager.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.manager.grpcPort }}
|
||||
{{- else }}
|
||||
- {{ .Values.externalManager.host }}:{{ .Values.externalManager.grpcPort }}
|
||||
addr: http://{{ .Values.externalManager.host }}:{{ .Values.externalManager.grpcPort }}
|
||||
{{- end }}
|
||||
scheduler:
|
||||
{{ toYaml .Values.seedClient.config.scheduler | indent 6 }}
|
||||
|
@ -40,18 +39,12 @@ data:
|
|||
{{ toYaml .Values.seedClient.config.gc | indent 6 }}
|
||||
proxy:
|
||||
{{ toYaml .Values.seedClient.config.proxy | indent 6 }}
|
||||
security:
|
||||
{{ toYaml .Values.seedClient.config.security | indent 6 }}
|
||||
health:
|
||||
{{ toYaml .Values.seedClient.config.health | indent 6 }}
|
||||
metrics:
|
||||
{{ toYaml .Values.seedClient.config.metrics | indent 6 }}
|
||||
stats:
|
||||
{{ toYaml .Values.seedClient.config.stats | indent 6 }}
|
||||
{{- if .Values.seedClient.config.tracing }}
|
||||
tracing:
|
||||
{{ toYaml .Values.seedClient.config.tracing | indent 6 }}
|
||||
{{- else if .Values.jaeger.enable }}
|
||||
jaeger: http://{{ $.Release.Name }}-jaeger-collector.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }}:14268/api/traces
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
|
|
@ -4,11 +4,11 @@ kind: StatefulSet
|
|||
metadata:
|
||||
labels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.seedClient.name }}
|
||||
name: {{ template "dragonfly.seedClient.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
annotations:
|
||||
{{- if .Values.seedClient.statefulsetAnnotations }}
|
||||
{{ toYaml .Values.seedClient.statefulsetAnnotations | indent 4 }}
|
||||
|
@ -23,14 +23,12 @@ spec:
|
|||
matchLabels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
component: {{ .Values.seedClient.name }}
|
||||
release: {{ .Release.Name }}
|
||||
serviceName: seed-client
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
component: {{ .Values.seedClient.name }}
|
||||
release: {{ .Release.Name }}
|
||||
{{- if .Values.seedClient.podLabels }}
|
||||
{{ toYaml .Values.seedClient.podLabels | indent 8 }}
|
||||
{{- end }}
|
||||
|
@ -40,6 +38,10 @@ spec:
|
|||
{{ toYaml .Values.seedClient.podAnnotations | indent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
hostNetwork: {{ .Values.seedClient.hostNetwork }}
|
||||
{{- if .Values.seedClient.hostNetwork }}
|
||||
dnsPolicy: "ClusterFirstWithHostNet"
|
||||
{{- end }}
|
||||
{{- with .Values.seedClient.nodeSelector | default .Values.global.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
|
@ -55,7 +57,7 @@ spec:
|
|||
{{- if quote .Values.seedClient.terminationGracePeriodSeconds }}
|
||||
terminationGracePeriodSeconds: {{ .Values.seedClient.terminationGracePeriodSeconds }}
|
||||
{{- end }}
|
||||
{{- if and (.Capabilities.APIVersions.Has "scheduling.k8s.io/v1beta1") (.Values.seedClient.priorityClassName) }}
|
||||
{{- if .Values.seedClient.priorityClassName }}
|
||||
priorityClassName: {{ .Values.seedClient.priorityClassName }}
|
||||
{{- end }}
|
||||
{{- with .Values.seedClient.image.pullSecrets | default .Values.global.imagePullSecrets }}
|
||||
|
@ -75,17 +77,19 @@ spec:
|
|||
{{- else }}
|
||||
command: ['sh', '-c', 'until nslookup {{ .Values.externalManager.host }} && nc -vz {{ .Values.externalManager.host }} {{ .Values.externalManager.restPort }}; do echo waiting for external manager; sleep 2; done;']
|
||||
{{- end }}
|
||||
resources:
|
||||
{{ toYaml .Values.seedClient.initContainer.resources | indent 10 }}
|
||||
containers:
|
||||
- name: seed-client
|
||||
image: {{ template "seedClient.image" . }}
|
||||
imagePullPolicy: {{ .Values.seedClient.image.pullPolicy | quote }}
|
||||
args:
|
||||
- --log-level={{ .Values.client.config.log.level }}
|
||||
{{- if .Values.seedClient.config.verbose }}
|
||||
- --verbose
|
||||
{{- if .Values.seedClient.config.console }}
|
||||
- --console
|
||||
{{- end }}
|
||||
resources:
|
||||
{{ toYaml .Values.seedClient.resources | indent 12 }}
|
||||
{{ toYaml .Values.seedClient.resources | indent 10 }}
|
||||
env:
|
||||
{{- if .Values.seedClient.maxProcs }}
|
||||
- name: GOMAXPROCS
|
||||
|
@ -104,14 +108,16 @@ spec:
|
|||
protocol: TCP
|
||||
readinessProbe:
|
||||
exec:
|
||||
command: ["/bin/grpc_health_probe", "-addr=:{{ .Values.seedClient.config.upload.server.port }}"]
|
||||
command: ["/bin/grpc_health_probe", "-addr=unix://{{ .Values.seedClient.config.download.server.socketPath }}"]
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
periodSeconds: 30
|
||||
timeoutSeconds: 5
|
||||
livenessProbe:
|
||||
exec:
|
||||
command: ["/bin/grpc_health_probe", "-addr=:{{ .Values.seedClient.config.upload.server.port }}"]
|
||||
command: ["/bin/grpc_health_probe", "-addr=unix://{{ .Values.seedClient.config.download.server.socketPath }}"]
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 10
|
||||
periodSeconds: 30
|
||||
timeoutSeconds: 5
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: "/etc/dragonfly"
|
||||
|
|
|
@ -3,9 +3,9 @@ apiVersion: v1
|
|||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "dragonfly.seedClient.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.seedClient.name }}
|
||||
|
@ -21,18 +21,23 @@ spec:
|
|||
ports:
|
||||
- port: {{ .Values.seedClient.config.proxy.server.port }}
|
||||
name: http-proxy
|
||||
appProtocol: http
|
||||
protocol: TCP
|
||||
targetPort: {{ .Values.seedClient.config.proxy.server.port }}
|
||||
- port: {{ .Values.seedClient.config.health.server.port }}
|
||||
name: http-health
|
||||
appProtocol: http
|
||||
protocol: TCP
|
||||
targetPort: {{ .Values.seedClient.config.health.server.port }}
|
||||
- port: {{ .Values.seedClient.config.stats.server.port }}
|
||||
name: http-stats
|
||||
appProtocol: http
|
||||
protocol: TCP
|
||||
targetPort: {{ .Values.seedClient.config.stats.server.port }}
|
||||
{{- if eq .Values.seedClient.service.type "NodePort" }}
|
||||
nodePort: {{ .Values.seedClient.service.nodePort }}
|
||||
{{- end }}
|
||||
selector:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
release: {{ .Release.Name }}
|
||||
component: {{ .Values.seedClient.name }}
|
||||
{{- end }}
|
||||
|
|
|
@ -3,9 +3,9 @@ apiVersion: monitoring.coreos.com/v1
|
|||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ template "dragonfly.seedClient.fullname" . }}
|
||||
namespace: {{ include "common.names.namespace" . | quote }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.seedClient.name }}
|
||||
|
@ -27,6 +27,5 @@ spec:
|
|||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
component: {{ .Values.seedClient.name }}-metrics
|
||||
{{- end }}
|
||||
|
|
|
@ -1,30 +0,0 @@
|
|||
{{- if and .Values.seedPeer.metrics.enable .Values.seedPeer.metrics.serviceMonitor.enable }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "dragonfly.seedPeer.fullname" . }}-metrics
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.seedPeer.name }}-metrics
|
||||
{{- if .Values.seedPeer.metrics.service.labels }}
|
||||
{{ toYaml .Values.metrics.service.labels | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.seedPeer.metrics.service.annotations }}
|
||||
annotations:
|
||||
{{ toYaml .Values.seedPeer.metrics.service.annotations | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
type: {{ .Values.seedPeer.metrics.service.type }}
|
||||
ports:
|
||||
- port: 8000
|
||||
name: http-metrics
|
||||
targetPort: 8000
|
||||
protocol: TCP
|
||||
selector:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
component: {{ .Values.seedPeer.name }}
|
||||
release: {{ .Release.Name }}
|
||||
{{- end }}
|
|
@ -1,20 +0,0 @@
|
|||
{{- if and .Values.seedPeer.metrics.enable .Values.seedPeer.metrics.prometheusRule.enable }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: {{ template "dragonfly.seedPeer.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.seedPeer.name }}
|
||||
{{- if .Values.seedPeer.metrics.prometheusRule.additionalLabels }}
|
||||
{{ toYaml .Values.seedPeer.metrics.prometheusRule.additionalLabels | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
groups:
|
||||
- name: {{ template "dragonfly.seedPeer.fullname" $ }}
|
||||
rules:
|
||||
{{ toYaml .Values.seedPeer.metrics.prometheusRule.rules | indent 8 }}
|
||||
{{- end }}
|
|
@ -1,87 +0,0 @@
|
|||
{{- if .Values.seedPeer.enable }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "dragonfly.seedPeer.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.seedPeer.name }}
|
||||
data:
|
||||
dfget.yaml: |-
|
||||
aliveTime: {{ .Values.seedPeer.config.aliveTime }}
|
||||
gcInterval: {{ .Values.seedPeer.config.gcInterval }}
|
||||
keepStorage: {{ .Values.seedPeer.config.keepStorage }}
|
||||
workHome: {{ .Values.seedPeer.config.workHome }}
|
||||
logDir: {{ .Values.seedPeer.config.logDir }}
|
||||
cacheDir: {{ .Values.seedPeer.config.cacheDir }}
|
||||
pluginDir: {{ .Values.seedPeer.config.pluginDir }}
|
||||
dataDir: {{ .Values.seedPeer.config.dataDir }}
|
||||
console: {{ .Values.seedPeer.config.console }}
|
||||
health:
|
||||
{{ toYaml .Values.dfdaemon.config.health | indent 6 }}
|
||||
verbose: {{ .Values.seedPeer.config.verbose }}
|
||||
{{- if .Values.seedPeer.config.verbose }}
|
||||
pprof-port: {{ .Values.seedPeer.config.pprofPort }}
|
||||
{{- end }}
|
||||
{{- if .Values.seedPeer.metrics.enable }}
|
||||
metrics: ":8000"
|
||||
{{- end }}
|
||||
{{- if .Values.seedPeer.config.jaeger }}
|
||||
jaeger: {{ .Values.seedPeer.config.jaeger }}
|
||||
{{- else if .Values.jaeger.enable }}
|
||||
jaeger: http://{{ $.Release.Name }}-jaeger-collector.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }}:14268/api/traces
|
||||
{{- end }}
|
||||
scheduler:
|
||||
manager:
|
||||
enable: {{ .Values.seedPeer.config.scheduler.manager.enable }}
|
||||
netAddrs:
|
||||
{{- if and (.Values.seedPeer.config.scheduler.manager.enable) (.Values.seedPeer.config.scheduler.manager.netAddrs) }}
|
||||
{{ toYaml .Values.seedPeer.config.scheduler.manager.netAddrs | indent 10 }}
|
||||
{{- else if .Values.manager.enable }}
|
||||
- type: tcp
|
||||
addr: {{ template "dragonfly.manager.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.manager.grpcPort }}
|
||||
{{- else }}
|
||||
- type: tcp
|
||||
addr: {{ .Values.externalManager.host }}:{{ .Values.externalManager.grpcPort }}
|
||||
{{- end }}
|
||||
refreshInterval: {{ .Values.seedPeer.config.scheduler.manager.refreshInterval }}
|
||||
seedPeer:
|
||||
{{ toYaml .Values.seedPeer.config.scheduler.manager.seedPeer | indent 10 }}
|
||||
scheduleTimeout: {{ .Values.seedPeer.config.scheduler.scheduleTimeout }}
|
||||
disableAutoBackSource: {{ .Values.seedPeer.config.scheduler.disableAutoBackSource }}
|
||||
host:
|
||||
{{ toYaml .Values.seedPeer.config.host | indent 6 }}
|
||||
download:
|
||||
{{ toYaml .Values.seedPeer.config.download | indent 6 }}
|
||||
upload:
|
||||
{{ toYaml .Values.seedPeer.config.upload | indent 6 }}
|
||||
storage:
|
||||
{{ toYaml .Values.seedPeer.config.storage | indent 6 }}
|
||||
proxy:
|
||||
defaultFilter: {{ .Values.seedPeer.config.proxy.defaultFilter }}
|
||||
defaultTag: {{ .Values.seedPeer.config.proxy.defaultTag }}
|
||||
tcpListen:
|
||||
{{- if not .Values.seedPeer.hostNetwork }}
|
||||
namespace: {{ .Values.seedPeer.config.proxy.tcpListen.namespace }}
|
||||
{{- end }}
|
||||
port: {{ .Values.seedPeer.containerPort }}
|
||||
security:
|
||||
{{ toYaml .Values.seedPeer.config.proxy.security | indent 8 }}
|
||||
registryMirror:
|
||||
{{ toYaml .Values.seedPeer.config.proxy.registryMirror | indent 8 }}
|
||||
proxies:
|
||||
{{ toYaml .Values.seedPeer.config.proxy.proxies | indent 8 }}
|
||||
objectStorage:
|
||||
{{ toYaml .Values.seedPeer.config.objectStorage | indent 6 }}
|
||||
security:
|
||||
{{ toYaml .Values.seedPeer.config.security | indent 6 }}
|
||||
network:
|
||||
{{ toYaml .Values.seedPeer.config.network | indent 6 }}
|
||||
announcer:
|
||||
{{ toYaml .Values.seedPeer.config.announcer | indent 6 }}
|
||||
networkTopology:
|
||||
{{ toYaml .Values.seedPeer.config.networkTopology | indent 6 }}
|
||||
{{- end }}
|
|
@ -1,155 +0,0 @@
|
|||
{{- if .Values.seedPeer.enable }}
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.seedPeer.name }}
|
||||
name: {{ template "dragonfly.seedPeer.fullname" . }}
|
||||
annotations:
|
||||
{{- if .Values.seedPeer.statefulsetAnnotations }}
|
||||
{{ toYaml .Values.seedPeer.statefulsetAnnotations | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.seedPeer.updateStrategy }}
|
||||
updateStrategy:
|
||||
{{ toYaml .Values.seedPeer.updateStrategy | indent 4 }}
|
||||
{{- end }}
|
||||
replicas: {{ .Values.seedPeer.replicas }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
component: {{ .Values.seedPeer.name }}
|
||||
release: {{ .Release.Name }}
|
||||
serviceName: seed-peer
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
component: {{ .Values.seedPeer.name }}
|
||||
release: {{ .Release.Name }}
|
||||
{{- if .Values.seedPeer.podLabels }}
|
||||
{{ toYaml .Values.seedPeer.podLabels | indent 8 }}
|
||||
{{- end }}
|
||||
annotations:
|
||||
checksum/config: {{ include (print $.Template.BasePath "/seed-peer/seed-peer-configmap.yaml") . | sha256sum }}
|
||||
{{- if .Values.seedPeer.podAnnotations }}
|
||||
{{ toYaml .Values.seedPeer.podAnnotations | indent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
hostNetwork: {{ .Values.seedPeer.hostNetwork }}
|
||||
{{- if .Values.seedPeer.hostNetwork }}
|
||||
dnsPolicy: "ClusterFirstWithHostNet"
|
||||
{{- end }}
|
||||
{{- with .Values.seedPeer.nodeSelector | default .Values.global.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.seedPeer.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml .Values.seedPeer.tolerations | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.seedPeer.affinity }}
|
||||
affinity:
|
||||
{{ toYaml .Values.seedPeer.affinity | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if quote .Values.seedPeer.terminationGracePeriodSeconds }}
|
||||
terminationGracePeriodSeconds: {{ .Values.seedPeer.terminationGracePeriodSeconds }}
|
||||
{{- end }}
|
||||
{{- if and (.Capabilities.APIVersions.Has "scheduling.k8s.io/v1beta1") (.Values.seedPeer.priorityClassName) }}
|
||||
priorityClassName: {{ .Values.seedPeer.priorityClassName }}
|
||||
{{- end }}
|
||||
{{- with .Values.seedPeer.image.pullSecrets | default .Values.global.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.seedPeer.hostAliases }}
|
||||
hostAliases:
|
||||
{{ toYaml .Values.seedPeer.hostAliases | indent 8 }}
|
||||
{{- end }}
|
||||
initContainers:
|
||||
- name: wait-for-manager
|
||||
image: {{ template "seedPeer.initContainer.image" . }}
|
||||
imagePullPolicy: {{ .Values.seedPeer.initContainer.image.pullPolicy }}
|
||||
{{- if .Values.manager.enable }}
|
||||
command: ['sh', '-c', 'until nslookup {{ template "dragonfly.manager.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} && nc -vz {{ template "dragonfly.manager.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} {{ .Values.manager.restPort }}; do echo waiting for manager; sleep 2; done;']
|
||||
{{- else }}
|
||||
command: ['sh', '-c', 'until nslookup {{ .Values.externalManager.host }} && nc -vz {{ .Values.externalManager.host }} {{ .Values.externalManager.restPort }}; do echo waiting for external manager; sleep 2; done;']
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: seed-peer
|
||||
image: {{ template "seedPeer.image" . }}
|
||||
imagePullPolicy: {{ .Values.seedPeer.image.pullPolicy | quote }}
|
||||
resources:
|
||||
{{ toYaml .Values.seedPeer.resources | indent 12 }}
|
||||
env:
|
||||
{{- if .Values.seedPeer.maxProcs }}
|
||||
- name: GOMAXPROCS
|
||||
value: {{ .Values.seedPeer.maxProcs }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- containerPort: {{ .Values.seedPeer.config.download.peerGRPC.tcpListen.port }}
|
||||
protocol: TCP
|
||||
- containerPort: {{ .Values.seedPeer.config.upload.tcpListen.port }}
|
||||
protocol: TCP
|
||||
{{- if .Values.seedPeer.config.objectStorage.enable }}
|
||||
- containerPort: {{ .Values.seedPeer.config.objectStorage.tcpListen.port }}
|
||||
protocol: TCP
|
||||
{{- end }}
|
||||
{{- if .Values.seedPeer.metrics.enable }}
|
||||
- containerPort: 8000
|
||||
protocol: TCP
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: "/etc/dragonfly"
|
||||
- name: storage
|
||||
mountPath: {{ .Values.seedPeer.config.dataDir }}
|
||||
{{- if .Values.seedPeer.extraVolumeMounts }}
|
||||
{{- toYaml .Values.seedPeer.extraVolumeMounts | nindent 8 }}
|
||||
{{- end }}
|
||||
readinessProbe:
|
||||
exec:
|
||||
command: ["/bin/grpc_health_probe", "-addr=:{{ .Values.seedPeer.config.download.peerGRPC.tcpListen.port }}"]
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
livenessProbe:
|
||||
exec:
|
||||
command: ["/bin/grpc_health_probe", "-addr=:{{ .Values.seedPeer.config.download.peerGRPC.tcpListen.port }}"]
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 10
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: {{ template "dragonfly.seedPeer.fullname" $ }}
|
||||
items:
|
||||
- key: dfget.yaml
|
||||
path: dfget.yaml
|
||||
{{- if not (.Values.seedPeer.persistence.enable) }}
|
||||
- name: storage
|
||||
emptyDir: {}
|
||||
{{- end }}
|
||||
{{- if .Values.seedPeer.extraVolumes }}
|
||||
{{- toYaml .Values.seedPeer.extraVolumes | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.seedPeer.persistence.enable }}
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: storage
|
||||
{{- range $key, $value := .Values.seedPeer.persistence.annotations }}
|
||||
{{ $key }}: {{ $value }}
|
||||
{{- end }}
|
||||
spec:
|
||||
accessModes:
|
||||
{{- range .Values.seedPeer.persistence.accessModes }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.seedPeer.persistence.size | quote }}
|
||||
{{- include "common.storage.class" (dict "persistence" .Values.seedPeer.persistence "global" .Values.global) | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -1,32 +0,0 @@
|
|||
{{- if and .Values.seedPeer.metrics.enable .Values.seedPeer.metrics.serviceMonitor.enable }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ template "dragonfly.seedPeer.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.seedPeer.name }}
|
||||
{{- if .Values.seedPeer.metrics.serviceMonitor.additionalLabels }}
|
||||
{{ toYaml .Values.seedPeer.metrics.serviceMonitor.additionalLabels | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
endpoints:
|
||||
- port: http-metrics
|
||||
{{- if .Values.seedPeer.metrics.serviceMonitor.interval }}
|
||||
interval: {{ .Values.seedPeer.metrics.serviceMonitor.interval }}
|
||||
{{- end }}
|
||||
{{- if .Values.seedPeer.metrics.serviceMonitor.scrapeTimeout }}
|
||||
scrapeTimeout: {{ .Values.seedPeer.metrics.serviceMonitor.scrapeTimeout }}
|
||||
{{- end }}
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- {{ .Release.Namespace }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
component: {{ .Values.seedPeer.name }}-metrics
|
||||
{{- end }}
|
|
@ -1,30 +0,0 @@
|
|||
{{- if .Values.trainer.metrics.enable }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "dragonfly.trainer.fullname" . }}-metrics
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.trainer.name }}-metrics
|
||||
{{- if .Values.trainer.metrics.service.labels }}
|
||||
{{ toYaml .Values.metrics.service.labels | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.trainer.metrics.service.annotations }}
|
||||
annotations:
|
||||
{{ toYaml .Values.trainer.metrics.service.annotations | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
type: {{ .Values.trainer.metrics.service.type }}
|
||||
ports:
|
||||
- port: 8000
|
||||
name: http-metrics
|
||||
targetPort: 8000
|
||||
protocol: TCP
|
||||
selector:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
release: {{ .Release.Name }}
|
||||
component: {{ .Values.trainer.name }}
|
||||
{{- end }}
|
|
@ -1,20 +0,0 @@
|
|||
{{- if and .Values.trainer.metrics.enable .Values.trainer.metrics.prometheusRule.enable }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: {{ template "dragonfly.trainer.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.trainer.name }}
|
||||
{{- if .Values.trainer.metrics.prometheusRule.additionalLabels }}
|
||||
{{ toYaml .Values.trainer.metrics.prometheusRule.additionalLabels | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
groups:
|
||||
- name: {{ template "dragonfly.trainer.fullname" $ }}
|
||||
rules:
|
||||
{{ toYaml .Values.trainer.metrics.prometheusRule.rules | indent 8 }}
|
||||
{{- end }}
|
|
@ -1,32 +0,0 @@
|
|||
{{- if and .Values.trainer.metrics.enable .Values.trainer.metrics.serviceMonitor.enable }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ template "dragonfly.trainer.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.trainer.name }}
|
||||
{{- if .Values.trainer.metrics.serviceMonitor.additionalLabels }}
|
||||
{{ toYaml .Values.trainer.metrics.serviceMonitor.additionalLabels | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
endpoints:
|
||||
- port: http-metrics
|
||||
{{- if .Values.trainer.metrics.serviceMonitor.interval }}
|
||||
interval: {{ .Values.trainer.metrics.serviceMonitor.interval }}
|
||||
{{- end }}
|
||||
{{- if .Values.trainer.metrics.serviceMonitor.scrapeTimeout }}
|
||||
scrapeTimeout: {{ .Values.trainer.metrics.serviceMonitor.scrapeTimeout }}
|
||||
{{- end }}
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- {{ .Release.Namespace }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
component: {{ .Values.trainer.name }}-metrics
|
||||
{{- end }}
|
|
@ -1,35 +0,0 @@
|
|||
{{- if .Values.trainer.enable }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "dragonfly.trainer.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.trainer.name }}
|
||||
data:
|
||||
trainer.yaml: |-
|
||||
server:
|
||||
{{ toYaml .Values.trainer.config.server | indent 6 }}
|
||||
security:
|
||||
{{ toYaml .Values.trainer.config.security | indent 6 }}
|
||||
network:
|
||||
{{ toYaml .Values.trainer.config.network | indent 6 }}
|
||||
manager:
|
||||
{{ toYaml .Values.trainer.config.manager | indent 6 }}
|
||||
metrics:
|
||||
enable: {{ .Values.trainer.metrics.enable }}
|
||||
addr: ":8000"
|
||||
console: {{ .Values.trainer.config.console }}
|
||||
verbose: {{ .Values.trainer.config.verbose }}
|
||||
{{- if .Values.trainer.config.verbose }}
|
||||
pprof-port: {{ .Values.trainer.config.pprofPort }}
|
||||
{{- end }}
|
||||
{{- if .Values.trainer.config.jaeger }}
|
||||
jaeger: {{ .Values.trainer.config.jaeger }}
|
||||
{{- else if .Values.jaeger.enable }}
|
||||
jaeger: http://{{ $.Release.Name }}-jaeger-collector.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }}:14268/api/traces
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -1,111 +0,0 @@
|
|||
{{- if .Values.trainer.enable }}
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.trainer.name }}
|
||||
name: {{ template "dragonfly.trainer.fullname" . }}
|
||||
annotations:
|
||||
{{- if .Values.trainer.deploymentAnnotations }}
|
||||
{{ toYaml .Values.trainer.deploymentAnnotations | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.trainer.updateStrategy }}
|
||||
strategy: {{- toYaml .Values.trainer.updateStrategy | nindent 4 }}
|
||||
{{- end }}
|
||||
replicas: {{ .Values.trainer.replicas }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
component: {{ .Values.trainer.name }}
|
||||
release: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
component: {{ .Values.trainer.name }}
|
||||
release: {{ .Release.Name }}
|
||||
{{- if .Values.trainer.podLabels }}
|
||||
{{ toYaml .Values.trainer.podLabels | indent 8 }}
|
||||
{{- end }}
|
||||
annotations:
|
||||
checksum/config: {{ include (print $.Template.BasePath "/trainer/trainer-configmap.yaml") . | sha256sum }}
|
||||
{{- if .Values.trainer.podAnnotations }}
|
||||
{{ toYaml .Values.trainer.podAnnotations | indent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.trainer.nodeSelector | default .Values.global.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.trainer.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml .Values.trainer.tolerations | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.trainer.affinity }}
|
||||
affinity:
|
||||
{{ toYaml .Values.trainer.affinity | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if quote .Values.trainer.terminationGracePeriodSeconds }}
|
||||
terminationGracePeriodSeconds: {{ .Values.trainer.terminationGracePeriodSeconds }}
|
||||
{{- end }}
|
||||
{{- if and (.Capabilities.APIVersions.Has "scheduling.k8s.io/v1beta1") (.Values.scheduler.priorityClassName) }}
|
||||
priorityClassName: {{ .Values.trainer.priorityClassName }}
|
||||
{{- end }}
|
||||
{{- with .Values.trainer.image.pullSecrets | default .Values.global.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.trainer.hostAliases }}
|
||||
hostAliases:
|
||||
{{ toYaml .Values.trainer.hostAliases | indent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: trainer
|
||||
image: {{ template "trainer.image" . }}
|
||||
imagePullPolicy: {{ .Values.trainer.image.pullPolicy | quote }}
|
||||
resources:
|
||||
{{ toYaml .Values.trainer.resources | indent 12 }}
|
||||
env:
|
||||
{{- if .Values.trainer.maxProcs }}
|
||||
- name: GOMAXPROCS
|
||||
value: {{ .Values.trainer.maxProcs }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- containerPort: {{ .Values.trainer.containerPort }}
|
||||
protocol: TCP
|
||||
{{- if .Values.trainer.metrics.enable }}
|
||||
- containerPort: 8000
|
||||
protocol: TCP
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: "/etc/dragonfly"
|
||||
{{- if .Values.trainer.extraVolumeMounts }}
|
||||
{{- toYaml .Values.trainer.extraVolumeMounts | nindent 8 }}
|
||||
{{- end }}
|
||||
readinessProbe:
|
||||
exec:
|
||||
command: ["/bin/grpc_health_probe", "-addr=:{{ .Values.trainer.containerPort }}"]
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
livenessProbe:
|
||||
exec:
|
||||
command: ["/bin/grpc_health_probe", "-addr=:{{ .Values.trainer.containerPort }}"]
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 10
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: {{ template "dragonfly.trainer.fullname" . }}
|
||||
items:
|
||||
- key: trainer.yaml
|
||||
path: trainer.yaml
|
||||
{{- if .Values.trainer.extraVolumes }}
|
||||
{{- toYaml .Values.trainer.extraVolumes | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -1,30 +0,0 @@
|
|||
{{- if .Values.trainer.enable }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "dragonfly.trainer.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.trainer.name }}
|
||||
{{- if .Values.trainer.service.labels }}
|
||||
{{ toYaml .Values.trainer.service.labels | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.trainer.service.annotations }}
|
||||
annotations:
|
||||
{{ toYaml .Values.trainer.service.annotations | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
type: {{ .Values.trainer.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.trainer.config.server.port }}
|
||||
name: http-grpc
|
||||
protocol: TCP
|
||||
targetPort: {{ .Values.trainer.config.server.port }}
|
||||
selector:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
release: {{ .Release.Name }}
|
||||
component: {{ .Values.trainer.name }}
|
||||
{{- end }}
|
|
@ -1,114 +0,0 @@
|
|||
{{- if .Values.triton.enable }}
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ template "dragonfly.triton.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.triton.name }}
|
||||
annotations:
|
||||
{{- if .Values.triton.deploymentAnnotations }}
|
||||
{{ toYaml .Values.triton.deploymentAnnotations | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.triton.updateStrategy }}
|
||||
updateStrategy:
|
||||
{{ toYaml .Values.triton.updateStrategy | indent 4 }}
|
||||
{{- end }}
|
||||
replicas: {{ .Values.triton.replicas }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
component: {{ .Values.triton.name }}
|
||||
release: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
component: {{ .Values.triton.name }}
|
||||
release: {{ .Release.Name }}
|
||||
annotations:
|
||||
checksum/serect: {{ include (print $.Template.BasePath "/triton/triton-secrets.yaml") . | sha256sum }}
|
||||
{{- if .Values.triton.podAnnotations }}
|
||||
{{ toYaml .Values.triton.podAnnotations | indent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.triton.nodeSelector | default .Values.global.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.triton.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml .Values.triton.tolerations | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.triton.affinity }}
|
||||
affinity:
|
||||
{{ toYaml .Values.triton.affinity | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if quote .Values.triton.terminationGracePeriodSeconds }}
|
||||
terminationGracePeriodSeconds: {{ .Values.triton.terminationGracePeriodSeconds }}
|
||||
{{- end }}
|
||||
{{- if and (.Capabilities.APIVersions.Has "scheduling.k8s.io/v1beta1") (.Values.triton.priorityClassName) }}
|
||||
priorityClassName: {{ .Values.triton.priorityClassName }}
|
||||
{{- end }}
|
||||
{{- with .Values.triton.image.pullSecrets | default .Values.global.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.triton.hostAliases }}
|
||||
hostAliases:
|
||||
{{ toYaml .Values.triton.hostAliases | indent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: triton
|
||||
image: {{ template "triton.image" . }}
|
||||
imagePullPolicy: {{ .Values.triton.image.pullPolicy | quote}}
|
||||
args: ["tritonserver", "--model-store={{ .Values.triton.modelRepositoryPath }}",
|
||||
"--model-control-mode=poll",
|
||||
"--repository-poll-secs=5"]
|
||||
env:
|
||||
{{- if .Values.triton.maxProcs }}
|
||||
- name: GOMAXPROCS
|
||||
value: {{ .Values.triton.maxProcs }}
|
||||
{{- end }}
|
||||
- name: DEFAULT_REGION
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ include "dragonfly.triton.fullname" . }}-credentials
|
||||
key: region
|
||||
- name: AWS_ACCESS_KEY_ID
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ include "dragonfly.triton.fullname" . }}-credentials
|
||||
key: accessKeyID
|
||||
- name: AWS_SECRET_ACCESS_KEY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ include "dragonfly.triton.fullname" . }}-credentials
|
||||
key: secretAccessKey
|
||||
ports:
|
||||
- containerPort: {{ .Values.triton.restPort }}
|
||||
name: http-rest
|
||||
protocol: TCP
|
||||
- containerPort: {{ .Values.triton.grpcPort }}
|
||||
name: http-grpc
|
||||
protocol: TCP
|
||||
- containerPort: 8002
|
||||
name: http-metrics
|
||||
protocol: TCP
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /v2/health/ready
|
||||
port: http-rest
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /v2/health/live
|
||||
port: http-rest
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 10
|
||||
{{- end}}
|
|
@ -1,11 +0,0 @@
|
|||
{{- if .Values.triton.enable }}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ template "dragonfly.triton.fullname" . }}-credentials
|
||||
type: Opaque
|
||||
data:
|
||||
region: {{ .Values.triton.aws.region | b64enc | quote }}
|
||||
accessKeyID: {{ .Values.triton.aws.accessKeyID | b64enc | quote }}
|
||||
secretAccessKey: {{ .Values.triton.aws.secretAccessKey | b64enc | quote }}
|
||||
{{- end}}
|
|
@ -1,27 +0,0 @@
|
|||
{{- if .Values.triton.enable }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "dragonfly.triton.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "dragonfly.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: {{ .Values.triton.name }}
|
||||
spec:
|
||||
type: {{ .Values.triton.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.triton.restPort }}
|
||||
name: http-rest
|
||||
protocol: TCP
|
||||
targetPort: http
|
||||
- port: {{ .Values.triton.grpcPort }}
|
||||
name: http-grpc
|
||||
protocol: TCP
|
||||
targetPort: grpc
|
||||
selector:
|
||||
app: {{ template "dragonfly.fullname" . }}
|
||||
release: {{ .Release.Name }}
|
||||
component: {{ .Values.triton.name }}
|
||||
{{- end}}
|
File diff suppressed because it is too large
Load Diff
|
@ -3,7 +3,7 @@ name: nydus-snapshotter
|
|||
description: Nydus snapshotter is an external plugin of containerd for Nydus image service which implements a chunk-based content-addressable filesystem on top of a called RAFS.
|
||||
icon: https://github.com/dragonflyoss/image-service/raw/master/misc/logo.svg
|
||||
type: application
|
||||
version: 0.0.8
|
||||
version: 0.0.10
|
||||
appVersion: 0.9.0
|
||||
keywords:
|
||||
- nydus
|
||||
|
@ -31,7 +31,7 @@ sources:
|
|||
|
||||
annotations:
|
||||
artifacthub.io/changes: |
|
||||
- Change maintainers to nydus maintainers.
|
||||
- Change default port of the Dragonfly.
|
||||
artifacthub.io/links: |
|
||||
- name: Chart Source
|
||||
url: https://github.com/dragonflyoss/helm-charts
|
||||
|
|
|
@ -58,29 +58,37 @@ helm delete nydus-snapshotter --namespace nydus-snapshotter
|
|||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| args | list | `[]` | Args to overwrite default nydus-snapshotter startup command |
|
||||
| containerRuntime | object | `{"containerd":{"configFile":"/etc/containerd/config.toml","enable":true},"initContainerImage":"ghcr.io/liubin/toml-cli:v0.0.7"}` | [Experimental] Container runtime support Choose special container runtime in Kubernetes. Support: Containerd, Docker, CRI-O |
|
||||
| containerRuntime | object | `{"containerd":{"configFile":"/etc/containerd/config.toml","enable":true},"initContainer":{"image":{"pullPolicy":"Always","registry":"ghcr.io","repository":"liubin/toml-cli","tag":"v0.0.7"}}}` | [Experimental] Container runtime support Choose special container runtime in Kubernetes. Support: Containerd, Docker, CRI-O |
|
||||
| containerRuntime.containerd | object | `{"configFile":"/etc/containerd/config.toml","enable":true}` | [Experimental] Containerd support |
|
||||
| containerRuntime.containerd.configFile | string | `"/etc/containerd/config.toml"` | Custom config path directory, default is /etc/containerd/config.toml |
|
||||
| containerRuntime.containerd.enable | bool | `true` | Enable containerd support Inject nydus-snapshotter config into ${containerRuntime.containerd.configFile}, |
|
||||
| containerRuntime.initContainerImage | string | `"ghcr.io/liubin/toml-cli:v0.0.7"` | The image name of init container, just to update container runtime configuration file |
|
||||
| containerRuntime.initContainer.image.pullPolicy | string | `"Always"` | Image pull policy. |
|
||||
| containerRuntime.initContainer.image.registry | string | `"ghcr.io"` | Image registry. |
|
||||
| containerRuntime.initContainer.image.repository | string | `"liubin/toml-cli"` | Image repository. |
|
||||
| containerRuntime.initContainer.image.tag | string | `"v0.0.7"` | Image tag. |
|
||||
| daemonsetAnnotations | object | `{}` | Daemonset annotations |
|
||||
| dragonfly.enable | bool | `true` | Enable dragonfly |
|
||||
| dragonfly.mirrorConfig[0].auth_through | bool | `false` | |
|
||||
| dragonfly.mirrorConfig[0].headers.X-Dragonfly-Registry | string | `"https://index.docker.io"` | |
|
||||
| dragonfly.mirrorConfig[0].host | string | `"http://127.0.0.1:65001"` | |
|
||||
| dragonfly.mirrorConfig[0].ping_url | string | `"http://127.0.0.1:40901/server/ping"` | |
|
||||
| dragonfly.mirrorConfig[0].host | string | `"http://127.0.0.1:4001"` | |
|
||||
| dragonfly.mirrorConfig[0].ping_url | string | `"http://127.0.0.1:4003/healthy"` | |
|
||||
| global.imagePullSecrets | list | `[]` | Global Docker registry secret names as an array. |
|
||||
| global.imageRegistry | string | `""` | Global Docker image registry. |
|
||||
| global.nodeSelector | object | `{}` | Global node labels for pod assignment. |
|
||||
| hostAliases | list | `[]` | Host Aliases |
|
||||
| hostNetwork | bool | `true` | Let nydus-snapshotter run in host network |
|
||||
| hostPid | bool | `true` | Let nydus-snapshotter use the host's pid namespace |
|
||||
| image | string | `"ghcr.io/containerd/nydus-snapshotter"` | Image repository |
|
||||
| image.pullPolicy | string | `"Always"` | Image pull policy. |
|
||||
| image.pullSecrets | list | `[]` (defaults to global.imagePullSecrets). | Image pull secrets. |
|
||||
| image.registry | string | `"ghcr.io"` | Image registry. |
|
||||
| image.repository | string | `"containerd/nydus-snapshotter"` | Image repository. |
|
||||
| image.tag | string | `"v0.9.0"` | Image tag. |
|
||||
| name | string | `"nydus-snapshotter"` | nydus-snapshotter name |
|
||||
| nodeSelector | object | `{}` | Node labels for pod assignment |
|
||||
| podAnnotations | object | `{}` | Pod annotations |
|
||||
| podLabels | object | `{}` | Pod labels |
|
||||
| priorityClassName | string | `""` | Pod priorityClassName |
|
||||
| pullPolicy | string | `"Always"` | Image pull policy |
|
||||
| resources | object | `{"limits":{"cpu":"2","memory":"2Gi"},"requests":{"cpu":"0","memory":"0"}}` | Pod resource requests and limits |
|
||||
| tag | string | `"v0.9.0"` | Image tag |
|
||||
| terminationGracePeriodSeconds | string | `nil` | Pod terminationGracePeriodSeconds |
|
||||
| tolerations | list | `[]` | List of node taints to tolerate |
|
||||
|
||||
|
|
|
@ -18,3 +18,43 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this
|
|||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Return the proper image name
|
||||
{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" .Values.global ) }}
|
||||
*/}}
|
||||
{{- define "common.images.image" -}}
|
||||
{{- $registryName := .imageRoot.registry -}}
|
||||
{{- $repositoryName := .imageRoot.repository -}}
|
||||
{{- $separator := ":" -}}
|
||||
{{- $termination := .imageRoot.tag | toString -}}
|
||||
{{- if .global }}
|
||||
{{- if .global.imageRegistry }}
|
||||
{{- $registryName = .global.imageRegistry -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- if .imageRoot.digest }}
|
||||
{{- $separator = "@" -}}
|
||||
{{- $termination = .imageRoot.digest | toString -}}
|
||||
{{- end -}}
|
||||
{{- if $registryName }}
|
||||
{{- printf "%s/%s%s%s" $registryName $repositoryName $separator $termination -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s%s%s" $repositoryName $separator $termination -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return the proper image name (for the nydus-snapshotter)
|
||||
*/}}
|
||||
{{- define "nydus-snapshotter.image" -}}
|
||||
{{- include "common.images.image" ( dict "imageRoot" .Values.image "global" .Values.global ) -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return the proper image name (for the nydus-snapshotter)
|
||||
*/}}
|
||||
{{- define "nydus-snapshotter.initContainer.image" -}}
|
||||
{{- include "common.images.image" ( dict "imageRoot" .Values.containerRuntime.initContainer.image "global" .Values.global ) -}}
|
||||
{{- end -}}
|
||||
|
|
|
@ -53,14 +53,18 @@ spec:
|
|||
{{- if and (.Capabilities.APIVersions.Has "scheduling.k8s.io/v1beta1") (.Values.priorityClassName) }}
|
||||
priorityClassName: {{ .Values.priorityClassName }}
|
||||
{{- end }}
|
||||
{{- with .Values.image.pullSecrets | default .Values.global.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.hostAliases }}
|
||||
hostAliases:
|
||||
{{ toYaml .Values.hostAliases | indent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: nydus-snapshotter
|
||||
image: "{{ .Values.image }}:{{ .Values.tag }}"
|
||||
imagePullPolicy: {{ .Values.pullPolicy | quote }}
|
||||
image: {{ template "nydus-snapshotter.image" . }}
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
env:
|
||||
- name: ENABLE_NYDUS_OVERLAY
|
||||
value: "false"
|
||||
|
@ -99,8 +103,8 @@ spec:
|
|||
initContainers:
|
||||
{{- if .Values.containerRuntime.containerd.enable }}
|
||||
- name: update-containerd
|
||||
image: "{{ .Values.containerRuntime.initContainerImage }}"
|
||||
imagePullPolicy: {{ .Values.pullPolicy | quote }}
|
||||
image: {{ template "nydus-snapshotter.initContainer.image" . }}
|
||||
imagePullPolicy: {{ .Values.containerRuntime.initContainer.image.pullPolicy }}
|
||||
resources:
|
||||
{{ toYaml .Values.resources | indent 12 }}
|
||||
command:
|
||||
|
|
|
@ -1,13 +1,27 @@
|
|||
# nydus-snapshotter Helm Chart Values
|
||||
#
|
||||
global:
|
||||
# -- Global Docker image registry.
|
||||
imageRegistry: ""
|
||||
# -- Global Docker registry secret names as an array.
|
||||
imagePullSecrets: []
|
||||
# -- Global node labels for pod assignment.
|
||||
nodeSelector: {}
|
||||
|
||||
# -- nydus-snapshotter name
|
||||
name: nydus-snapshotter
|
||||
# -- Image repository
|
||||
image: ghcr.io/containerd/nydus-snapshotter
|
||||
# -- Image tag
|
||||
tag: v0.9.0
|
||||
# -- Image pull policy
|
||||
pullPolicy: Always
|
||||
image:
|
||||
# -- Image registry.
|
||||
registry: ghcr.io
|
||||
# -- Image repository.
|
||||
repository: containerd/nydus-snapshotter
|
||||
# -- Image tag.
|
||||
tag: v0.9.0
|
||||
# -- Image pull policy.
|
||||
pullPolicy: Always
|
||||
# -- Image pull secrets.
|
||||
# @default -- `[]` (defaults to global.imagePullSecrets).
|
||||
pullSecrets: []
|
||||
# -- Let nydus-snapshotter run in host network
|
||||
hostNetwork: true
|
||||
# -- Let nydus-snapshotter use the host's pid namespace
|
||||
|
@ -43,19 +57,27 @@ dragonfly:
|
|||
# -- Enable dragonfly
|
||||
enable: true
|
||||
mirrorConfig:
|
||||
- host: "http://127.0.0.1:65001"
|
||||
- host: "http://127.0.0.1:4001"
|
||||
auth_through: false
|
||||
headers:
|
||||
"X-Dragonfly-Registry": "https://index.docker.io"
|
||||
ping_url: "http://127.0.0.1:40901/server/ping"
|
||||
ping_url: "http://127.0.0.1:4003/healthy"
|
||||
|
||||
|
||||
# -- [Experimental] Container runtime support
|
||||
# Choose special container runtime in Kubernetes.
|
||||
# Support: Containerd, Docker, CRI-O
|
||||
containerRuntime:
|
||||
# -- The image name of init container, just to update container runtime configuration file
|
||||
initContainerImage: ghcr.io/liubin/toml-cli:v0.0.7
|
||||
initContainer:
|
||||
image:
|
||||
# -- Image registry.
|
||||
registry: ghcr.io
|
||||
# -- Image repository.
|
||||
repository: liubin/toml-cli
|
||||
# -- Image tag.
|
||||
tag: v0.0.7
|
||||
# -- Image pull policy.
|
||||
pullPolicy: Always
|
||||
|
||||
# -- [Experimental] Containerd support
|
||||
containerd:
|
||||
|
|
Loading…
Reference in New Issue