Compare commits

..

No commits in common. "main" and "dragonfly-0.5.39" have entirely different histories.

72 changed files with 2464 additions and 6656 deletions

View File

@ -1,25 +0,0 @@
---
name: Bug Report
about: Report a bug for dragonfly charts
labels: bug
---
### Bug report:
<!-- Please describe what is actually happening -->
### Expected behavior:
<!-- Please describe what you expect to happen -->
### How to reproduce it:
<!-- How can a maintainer reproduce this issue (please be detailed) -->
### Environment:
- Dragonfly version:
- OS:
- Kernel (e.g. `uname -a`):
- Others:

View File

@ -1,5 +0,0 @@
---
name: Custom issue template
about: Custom issue template for dragonfly charts
---

View File

@ -1,18 +0,0 @@
---
name: Feature Request
about: Request a new feature for dragonfly charts
labels: enhancement
---
### Feature request:
<!-- Please describe the feature request and why you would like to have it -->
### Use case:
<!-- Please add a concrete use case to demonstrate how such a feature would add value for the user. -->
### UI Example:
<!-- If this is about a new command or command line options, please let us know how you would add it to UI (in the code block below). -->

View File

@ -1,16 +0,0 @@
<!--- Provide a general summary of your changes in the Title above -->
## Description
<!--- Describe your changes in detail -->
## Related Issue
<!--- This project only accepts pull requests related to open issues -->
<!--- If suggesting a new feature or change, please discuss it in an issue first -->
<!--- If fixing a bug, there should be an issue describing it with steps to reproduce -->
<!--- Please link to the issue here: -->
## Motivation and Context
<!--- Why is this change required? What problem does it solve? -->

View File

@ -1,17 +0,0 @@
# Set to true to add reviewers to pull requests
addReviewers: true
# Set to true to add assignees to pull requests
addAssignees: author
# A list of reviewers to be added to pull requests (GitHub user name)
reviewers:
- gaius-qi
- yxxhero
- chlins
- CormickKneey
- imeoer
- BraveY
# A number of reviewers added to the pull request
numberOfReviewers: 3

4
.github/ct.yaml vendored
View File

@ -1,4 +0,0 @@
helm-extra-args: --timeout 600s
check-version-increment: true
target-branch: main
debug: true

View File

@ -1,6 +0,0 @@
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"

View File

@ -1,11 +0,0 @@
name: "Auto Assign"
on:
pull_request_target:
types: [opened, reopened, ready_for_review]
jobs:
add-assignee:
runs-on: ubuntu-latest
steps:
- uses: kentaro-m/auto-assign-action@9f6dbe84a80c6e7639d1b9698048b201052a2a94

View File

@ -1,93 +0,0 @@
name: Lint and Test Charts
on:
push:
branches: [ main ]
pull_request:
branches: [ main ]
jobs:
changes:
outputs:
charts: ${{ steps.filter.outputs.charts }}
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- id: filter
uses: dorny/paths-filter@v3.0.2
with:
filters: |
charts:
- 'charts/**/Chart.yaml'
- 'charts/**/*'
token: ${{ secrets.GITHUB_TOKEN }}
lint-test:
if: needs.changes.outputs.charts == 'true'
runs-on: ubuntu-latest
needs:
- changes
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Fetch history
run: git fetch --prune --unshallow
- name: Set up Helm
uses: azure/setup-helm@v4
with:
version: v3.7.2
- uses: actions/setup-python@v5
with:
python-version: 3.9
- name: Set up chart-testing
uses: helm/chart-testing-action@v2.7.0
- name: Run chart-testing (list-changed)
id: list-changed
run: |
changed=$(ct list-changed --config ./.github/ct.yaml)
if [[ -n "$changed" ]]; then
echo "::set-output name=changed::true"
fi
- name: Run chart-testing (lint)
run: ct lint --config ./.github/ct.yaml
- name: Create kind cluster
uses: helm/kind-action@v1.12.0
if: steps.list-changed.outputs.changed == 'true'
- name: Add bitnami chart repos
run: helm repo add bitnami https://charts.bitnami.com/bitnami
- name: Add dragonfly chart repos
run: helm repo add dragonfly https://dragonflyoss.github.io/helm-charts/
- name: Add nydus-snapshotter chart repos
run: helm repo add nydus-snapshotter https://dragonflyoss.github.io/helm-charts/
- name: Run chart-testing (install)
run: ct install --config ./.github/ct.yaml
docs-validate:
if: needs.changes.outputs.charts == 'true'
needs:
- changes
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Check for changes
run: |
docker run --rm --volume "$(pwd):/helm-docs" -u "$(id -u)" jnorwood/helm-docs:v1.10.0
if ! git diff --exit-code; then
echo "Documentation not up to date. Please run helm-docs and commit changes!" >&2
exit 1
fi

View File

@ -1,20 +0,0 @@
name: PR Label
on:
pull_request:
types: [opened, labeled, unlabeled, synchronize]
permissions:
contents: read
jobs:
classify:
name: Classify PR
runs-on: ubuntu-latest
steps:
- name: PR impact specified
uses: mheap/github-action-required-labels@8afbe8ae6ab7647d0c9f0cfa7c2f939650d22509 # v5.5
with:
mode: exactly
count: 1
labels: 'bug, enhancement, documentation, dependencies'

View File

@ -4,17 +4,13 @@ on:
push:
branches:
- main
paths:
- charts/dragonfly-stack/Chart.yaml
- charts/dragonfly/Chart.yaml
- charts/nydus-snapshotter/Chart.yaml
jobs:
release:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v2
with:
fetch-depth: 0
@ -24,6 +20,6 @@ jobs:
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
- name: Run chart-releaser
uses: helm/chart-releaser-action@v1.7.0
uses: helm/chart-releaser-action@v1.1.0
env:
CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"

View File

@ -1,31 +0,0 @@
name: Close stale issues and PRs
on:
workflow_dispatch:
schedule:
- cron: "0 0 * * *"
permissions:
issues: write
pull-requests: write
jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9.1.0
id: stale
with:
delete-branch: true
days-before-close: 7
days-before-stale: 90
days-before-pr-close: 7
days-before-pr-stale: 120
stale-issue-label: "stale"
exempt-issue-labels: bug,wip,on-hold
exempt-pr-labels: bug,wip,on-hold
exempt-all-milestones: true
stale-issue-message: 'This issue is stale because it has been open 90 days with no activity.'
close-issue-message: 'This issue was closed because it has been stalled for 7 days with no activity.'
stale-pr-message: 'This PR is stale because it has been open 120 days with no activity.'
close-pr-message: 'This PR was closed because it has been stalled for 7 days with no activity.'

View File

@ -1,17 +1,17 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v3.1.0
hooks:
- id: end-of-file-fixer
- id: trailing-whitespace
- repo: https://github.com/gruntwork-io/pre-commit
rev: v0.1.15
hooks:
- id: helmlint
- repo: https://github.com/norwoodj/helm-docs
rev: v1.10.0
hooks:
- id: helm-docs
files: (README\.md\.gotmpl|(Chart|requirements|values)\.yaml)$
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v3.1.0
hooks:
- id: end-of-file-fixer
- id: trailing-whitespace
- repo: https://github.com/gruntwork-io/pre-commit
rev: v0.1.15
hooks:
- id: helmlint
- repo: https://github.com/norwoodj/helm-docs
rev: v1.3.0
hooks:
- id: helm-docs
files: (README\.md\.gotmpl|(Chart|requirements|values)\.yaml)$

View File

@ -1,308 +0,0 @@
# Install Dragonfly & Nydus on Kubernetes
This document will help you experience how to use [Dragonfly](https://d7y.io) & [Nydus](https://nydus.dev/).
## Prerequisites
<!-- markdownlint-disable -->
| Name | Version | Document |
| ------- | --------- | -------------------------------------------------------- |
| Kind | v0.17.0+ | [kind.sigs.k8s.io](https://kind.sigs.k8s.io/) |
| Helm | v3.11.0+ | [helm.sh](https://helm.sh/) |
| kubectl | v1.23.17+ | [kubernetes.io](https://kubernetes.io/docs/tasks/tools/) |
| docker | v23.0.3+ | [docker.com](https://docs.docker.com/engine/install/) |
<!-- markdownlint-restore -->
**Notice:** [Kind](https://kind.sigs.k8s.io/) is recommended if no kubernetes cluster is available for testing.
## Setup kubernetes cluster
Download containerd configuration for kind.
```shell
curl -fsSL -o config.toml https://raw.githubusercontent.com/dragonflyoss/dragonfly/main/test/testdata/containerd/config.toml
```
Create kind cluster configuration file `kind-config.yaml`, configuration content is as follows:
```shell
cat <<EOF > kind-config.yaml
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
networking:
ipFamily: dual
nodes:
- role: control-plane
image: kindest/node:v1.23.17
extraPortMappings:
- containerPort: 4001
hostPort: 4001
protocol: TCP
extraMounts:
- hostPath: ./config.toml
containerPath: /etc/containerd/config.toml
- hostPath: /tmp/artifact
containerPath: /tmp/artifact
- hostPath: /dev/fuse
containerPath: /dev/fuse
EOF
```
Create a kind cluster using the configuration file:
```shell
$ kind create cluster --config kind-config.yaml
Creating cluster "kind" ...
✓ Ensuring node image (kindest/node:v1.23.17) 🖼
✓ Preparing nodes 📦
✓ Writing configuration 📜
✓ Starting control-plane 🕹️
✓ Installing CNI 🔌
✓ Installing StorageClass 💾
Set kubectl context to "kind-kind"
You can now use your cluster with:
kubectl cluster-info --context kind-kind
Thanks for using kind! 😊
```
Switch the context of kubectl to kind cluster:
```shell
kubectl config use-context kind-kind
```
## Install Dragonfly based on Helm Charts
Install Dragonfly using the configuration:
<!-- markdownlint-disable -->
```shell
$ helm repo add dragonfly https://dragonflyoss.github.io/helm-charts/
$ helm install --wait --timeout 10m --dependency-update --create-namespace --namespace dragonfly-system dragonfly dragonfly/dragonfly --set client.config.proxy.prefetch=true,seedClient.config.proxy.prefetch=true
NAME: dragonfly
LAST DEPLOYED: Fri Apr 7 10:35:12 2023
NAMESPACE: dragonfly-system
STATUS: deployed
REVISION: 1
TEST SUITE: None
NOTES:
1. Get the scheduler address by running these commands:
export SCHEDULER_POD_NAME=$(kubectl get pods --namespace dragonfly-system -l "app=dragonfly,release=dragonfly,component=scheduler" -o jsonpath={.items[0].metadata.name})
export SCHEDULER_CONTAINER_PORT=$(kubectl get pod --namespace dragonfly-system $SCHEDULER_POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
kubectl --namespace dragonfly-system port-forward $SCHEDULER_POD_NAME 8002:$SCHEDULER_CONTAINER_PORT
echo "Visit http://127.0.0.1:8002 to use your scheduler"
2. Configure runtime to use dragonfly:
https://d7y.io/docs/getting-started/quick-start/kubernetes/
```
<!-- markdownlint-restore -->
Check that Dragonfly is deployed successfully:
```shell
$ kubectl wait po --all -n dragonfly-system --for=condition=ready --timeout=10m
pod/dragonfly-client-gs924 condition met
pod/dragonfly-manager-5d97fd88fb-txnw9 condition met
pod/dragonfly-manager-5d97fd88fb-v2nmh condition met
pod/dragonfly-manager-5d97fd88fb-xg6wr condition met
pod/dragonfly-mysql-0 condition met
pod/dragonfly-redis-master-0 condition met
pod/dragonfly-redis-replicas-0 condition met
pod/dragonfly-redis-replicas-1 condition met
pod/dragonfly-redis-replicas-2 condition met
pod/dragonfly-scheduler-0 condition met
pod/dragonfly-scheduler-1 condition met
pod/dragonfly-scheduler-2 condition met
pod/dragonfly-seed-client-0 condition met
pod/dragonfly-seed-client-1 condition met
pod/dragonfly-seed-client-2 condition met
```
## Install Nydus based on Helm Charts
Install Nydus using the default configuration, for more information about mirrors configuration, please refer to
[document](https://github.com/dragonflyoss/image-service/blob/master/docs/nydusd.md#enable-mirrors-for-storage-backend-recommend).
<!-- markdownlint-disable -->
```shell
$ curl -fsSL -o config-nydus.yaml https://raw.githubusercontent.com/dragonflyoss/dragonfly/main/test/testdata/charts/config-nydus.yaml
$ helm install --wait --timeout 10m --dependency-update --create-namespace --namespace nydus-snapshotter nydus-snapshotter dragonfly/nydus-snapshotter -f config-nydus.yaml
NAME: nydus-snapshotter
LAST DEPLOYED: Fri Apr 7 10:40:50 2023
NAMESPACE: nydus-snapshotter
STATUS: deployed
REVISION: 1
TEST SUITE: None
NOTES:
Thank you for installing nydus-snapshotter.
Your release is named nydus-snapshotter.
To learn more about the release, try:
$ helm status nydus-snapshotter
$ helm get all nydus-snapshotter
```
Check that Nydus is deployed successfully:
```shell
$ kubectl wait po --all -n nydus-snapshotter --for=condition=ready --timeout=1m
pod/nydus-snapshotter-6mwlv condition met
```
<!-- markdownlint-restore -->
## Run Nydus Image in Kubernetes
Create Nginx pod configuration file `nginx-nydus.yaml` with Nydus image `ghcr.io/dragonflyoss/image-service/nginx:nydus-latest`.
For more details about how to build nydus image, please refer to
the [document](https://github.com/dragonflyoss/image-service/blob/master/docs/containerd-env-setup.md#convertbuild-an-image-to-nydus-format).
```shell
cat <<EOF > nginx-nydus.yaml
apiVersion: v1
kind: Pod
metadata:
name: nginx
spec:
containers:
- name: nginx
image: ghcr.io/dragonflyoss/image-service/nginx:nydus-latest
imagePullPolicy: Always
command: ["sh", "-c"]
args:
- tail -f /dev/null
EOF
```
Create a Nginx pod using the configuration file:
```shell
kubectl apply -f nginx-nydus.yaml
```
Check that Nginx is deployed successfully:
```shell
$ kubectl wait po nginx --for=condition=ready --timeout=1m
pod/nginx condition met
```
Executing the `date` command in the Nginx container.
```shell
$ kubectl exec -it nginx -- date
Mon Apr 10 07:57:38 UTC 2023
```
## Verify downloaded Nydus image via Dragonfly
Verify downloaded Nydus image via Dragonfly based on mirror mode:
<!-- markdownlint-disable -->
```shell
$ CLIENT_POD_NAME=`kubectl -n dragonfly-system get pod -l component=client --no-headers -o custom-columns=NAME:metadata.name`
$ kubectl -n dragonfly-system exec -it ${CLIENT_POD_NAME} -- sh -c 'grep "download task succeeded" /var/log/dragonfly/dfdaemon/dfdaemon.log'
2024-05-28T12:36:24.861903Z INFO download_task: dragonfly-client/src/grpc/dfdaemon_download.rs:276: download task succeeded host_id="127.0.0.1-kind-worker" task_id="4535f073321f0d1908b8c3ad63a1d59324573c0083961c5bcb7f38ac72ad598d" peer_id="127.0.0.1-kind-worker-13095fb5-786a-4908-b8c1-744be144b383"
```
<!-- markdownlint-restore -->
## Performance testing
Test the performance of single-machine image download after the integration of Dragonfly and Nydus,
please refer to [document](https://d7y.io/docs/setup/integration/nydus).
## Notes
### Install Dragonfly and Nydus in [Alibaba Cloud ACK](https://www.alibabacloud.com/product/kubernetes)
If you are using Dragonfly and Nydus in [Alibaba Cloud ACK](https://www.alibabacloud.com/product/kubernetes),
you should follow the steps when deploying Dragonfly.
Create Draognfly configuration file `d7y-config.yaml`, configuration content is as follows:
<!-- markdownlint-disable -->
```shell
cat <<EOF > d7y-config.yaml
seedClient:
persistence:
storageClass: "alicloud-disk-essd"
size: 20Gi
redis:
master:
persistence:
storageClass: "alicloud-disk-essd"
size: 20Gi
replica:
persistence:
storageClass: "alicloud-disk-essd"
size: 20Gi
mysql:
primary:
persistence:
storageClass: "alicloud-disk-essd"
size: 20Gi
EOF
```
<!-- markdownlint-restore -->
Install Dragonfly using the params:
<!-- markdownlint-disable -->
```shell
$ helm repo add dragonfly https://dragonflyoss.github.io/helm-charts/
$ helm install --wait --timeout 10m --dependency-update --create-namespace --namespace dragonfly-system dragonfly dragonfly/dragonfly --set client.config.proxy.prefetch=true,seedClient.config.proxy.prefetch=true-f d7y-config.yaml
NAME: dragonfly
LAST DEPLOYED: Fri Apr 7 10:35:12 2023
NAMESPACE: dragonfly-system
STATUS: deployed
REVISION: 1
TEST SUITE: None
NOTES:
1. Get the scheduler address by running these commands:
export SCHEDULER_POD_NAME=$(kubectl get pods --namespace dragonfly-system -l "app=dragonfly,release=dragonfly,component=scheduler" -o jsonpath={.items[0].metadata.name})
export SCHEDULER_CONTAINER_PORT=$(kubectl get pod --namespace dragonfly-system $SCHEDULER_POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
kubectl --namespace dragonfly-system port-forward $SCHEDULER_POD_NAME 8002:$SCHEDULER_CONTAINER_PORT
echo "Visit http://127.0.0.1:8002 to use your scheduler"
2. Configure runtime to use dragonfly:
https://d7y.io/docs/getting-started/quick-start/kubernetes/
```
<!-- markdownlint-restore -->
Check that Dragonfly is deployed successfully:
```shell
$ kubectl wait po --all -n dragonfly-system --for=condition=ready --timeout=10m
pod/dragonfly-client-gs924 condition met
pod/dragonfly-manager-5d97fd88fb-txnw9 condition met
pod/dragonfly-manager-5d97fd88fb-v2nmh condition met
pod/dragonfly-manager-5d97fd88fb-xg6wr condition met
pod/dragonfly-mysql-0 condition met
pod/dragonfly-redis-master-0 condition met
pod/dragonfly-redis-replicas-0 condition met
pod/dragonfly-redis-replicas-1 condition met
pod/dragonfly-redis-replicas-2 condition met
pod/dragonfly-scheduler-0 condition met
pod/dragonfly-scheduler-1 condition met
pod/dragonfly-scheduler-2 condition met
pod/dragonfly-seed-client-0 condition met
pod/dragonfly-seed-client-1 condition met
pod/dragonfly-seed-client-2 condition met
```

View File

@ -1,45 +1,34 @@
# Dragonfly Community Helm Charts
# Dragonfly Helm Charts
[![Dragonfly Stack Artifact Hub](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/dragonfly-stack)](https://artifacthub.io/packages/helm/dragonfly/dragonfly-stack)
[![Dragonfly Artifact Hub](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/dragonfly)](https://artifacthub.io/packages/helm/dragonfly/dragonfly)
[![Nydus Artifact Hub](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/nydus-snapshotter)](https://artifacthub.io/packages/helm/dragonfly/nydus-snapshotter)
![Release Charts](https://github.com/dragonflyoss/helm-charts/workflows/Release%20Charts/badge.svg?branch=main)
[![Releases downloads](https://img.shields.io/github/downloads/dragonflyoss/helm-charts/total.svg)](https://github.com/dragonflyoss/helm-charts/releases)
[![Artifact Hub](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/dragonfly)](https://artifacthub.io/packages/search?repo=dragonfly)
Helm charts for Dragonfly Community.
Helm charts for Dragonfly.
## Introduction
Dragonfly is an open source P2P-based file distribution and
image acceleration system. It is hosted by the
Cloud Native Computing Foundation ([CNCF](https://cncf.io/)) as
an Incubating Level Project.
Dragonfly is an open source intelligent P2P based image and file distribution system. Its goal is to tackle all distribution problems in cloud native scenarios. Currently Dragonfly focuses on being:
- Simple: well-defined user-facing API (HTTP), non-invasive to all container engines;
- Efficient: CDN support, P2P based file distribution to save enterprise bandwidth;
- Intelligent: host level speed limit, intelligent flow control due to host detection;
- Secure: block transmission encryption, HTTPS connection support.
Dragonfly is now hosted by the Cloud Native Computing Foundation (CNCF) as an Incubating Level Project. Originally it was born to solve all kinds of distribution at very large scales, such as application distribution, cache distribution, log distribution, image distribution, and so on.
## Installation
Please refer to the [document][install] to install Dragonfly & Nydus on Kubernetes.
Please follow [this chart document](./charts/dragonfly/README.md)
## Documentation
## Contact
- [Install Dragonfly Stack on Kubernetes](./charts/dragonfly-stack/README.md)
- [Install Dragonfly on Kubernetes](./charts/dragonfly/README.md)
- [Install Nydus on Kubernetes](./charts/nydus-snapshotter/README.md)
- [Install Dragonfly & Nydus on Kubernetes][install]
Welcome developers to actively participate in community discussions and contribute code to Dragonfly. We will remain concerned about the issues discussed in the community and respond quickly.
## Community
Join the conversation and help the community.
- **Slack Channel**: [#dragonfly](https://cloud-native.slack.com/messages/dragonfly/) on [CNCF Slack](https://slack.cncf.io/)
- **Github Discussions**: [Dragonfly Discussion Forum](https://github.com/dragonflyoss/dragonfly/discussions)
- **Developer Group**: <dragonfly-developers@googlegroups.com>
- **Maintainer Group**: <dragonfly-maintainers@googlegroups.com>
- **Twitter**: [@dragonfly_oss](https://twitter.com/dragonfly_oss)
- **DingTalk**: [22880028764](https://qr.dingtalk.com/action/joingroup?code=v1,k1,pkV9IbsSyDusFQdByPSK3HfCG61ZCLeb8b/lpQ3uUqI=&_dt_no_comment=1&origin=11)
- Discussions: [Github Discussion Forum][discussion]
- DingTalk: 23304666
## License
Apache 2.0 License. Please see [License File][license] for more information.
[discussion]: https://github.com/dragonflyoss/Dragonfly2/discussions
[license]: LICENSE
[install]: INSTALL.md

View File

@ -1,23 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -1,54 +0,0 @@
apiVersion: v2
name: dragonfly-stack
description: Collects Dragonfly component and Nydus component into a single chart to provide a complete solution for the Dragonfly stack.
icon: https://raw.githubusercontent.com/dragonflyoss/dragonfly/main/docs/images/logo/dragonfly.svg
type: application
version: 0.1.2
appVersion: 2.1.49
keywords:
- dragonfly-stack
- dragonfly
- nydus
- d7y
- P2P
- image
maintainers:
- name: gaius-qi
email: gaius.qi@gmail.com
- name: imeoer
email: imeoer@gmail.com
- name: adamqqqplay
email: adamqqq@163.com
home: https://d7y.io/
sources:
- https://github.com/dragonflyoss/dragonfly
- https://github.com/dragonflyoss/client
- https://github.com/dragonflyoss/image-service
- https://github.com/containerd/nydus-snapshotter/
annotations:
artifacthub.io/changes: |
- Rename repo Dragonfly2 to dragonfly.
artifacthub.io/links: |
- name: Chart Source
url: https://github.com/dragonflyoss/helm-charts
- name: Source
url: https://github.com/dragonflyoss/dragonfly
- name: Source
url: https://github.com/dragonflyoss/client
- name: Source
url: https://github.com/containerd/nydus-snapshotter
dependencies:
- name: dragonfly
version: 1.1.67
repository: https://dragonflyoss.github.io/helm-charts/
condition: dragonfly.enable
- name: nydus-snapshotter
version: 0.0.10
repository: https://dragonflyoss.github.io/helm-charts/
condition: nydus-snapshotter.enable

File diff suppressed because one or more lines are too long

View File

@ -1,133 +0,0 @@
# Dragonfly Stack Helm Chart
[![Artifact Hub](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/dragonfly)](https://artifacthub.io/packages/search?repo=dragonfly-stack)
Collects Dragonfly component and Nydus component into a single chart to provide a complete solution for the Dragonfly stack.
## TL;DR
```shell
helm repo add dragonfly-stack https://dragonflyoss.github.io/helm-charts/
helm install --create-namespace --namespace dragonfly-system dragonfly dragonfly/dragonfly-stask
```
## Introduction
Dragonfly is an open source intelligent P2P based image and file distribution system. Its goal is to tackle all distribution problems in cloud native scenarios. Currently Dragonfly focuses on being:
- Simple: well-defined user-facing API (HTTP), non-invasive to all container engines;
- Efficient: Seed peer support, P2P based file distribution to save enterprise bandwidth;
- Intelligent: host level speed limit, intelligent flow control due to host detection;
- Secure: block transmission encryption, HTTPS connection support.
Dragonfly is now hosted by the Cloud Native Computing Foundation (CNCF) as an Incubating Level Project. Originally it was born to solve all kinds of distribution at very large scales, such as application distribution, cache distribution, log distribution, image distribution, and so on.
## Prerequisites
- Kubernetes cluster 1.20+
- Helm v3.8.0+
## Installation Guide
When use Dragonfly in Kubernetes, a container runtime must be configured. These work can be done by init script in this charts.
For more detail about installation is available in [Kubernetes with Dragonfly](https://d7y.io/docs/getting-started/quick-start/kubernetes/)
We recommend read the details about [Kubernetes with Dragonfly](https://d7y.io/docs/getting-started/quick-start/kubernetes/) before install.
> **We did not recommend to using dragonfly with docker in Kubernetes** due to many reasons: 1. no fallback image pulling policy. 2. deprecated in Kubernetes.
## Installation
### Install with custom configuration
Create the `values.yaml` configuration file. It is recommended to use external redis and mysql instead of containers. This example uses external mysql and redis.
```yaml
dragonfly:
mysql:
enable: false
externalMysql:
migrate: true
host: mysql-host
username: dragonfly
password: dragonfly
database: manager
port: 3306
redis:
enable: false
externalRedis:
addrs:
- redis.example.com:6379
password: dragonfly
```
Install dragonfly-stack chart with release name `dragonfly`:
```shell
helm repo add dragonfly-stack https://dragonflyoss.github.io/helm-charts/
helm install --create-namespace --namespace dragonfly-system dragonfly dragonfly/dragonfly-stack -f values.yaml
```
### Install with an existing manager
Create the `values.yaml` configuration file. Need to configure the cluster id associated with scheduler and seed peer. This example is to deploy a cluster using the existing manager and redis.
```yaml
dragonfly:
scheduler:
config:
manager:
schedulerClusterID: 1
seedClient:
config:
seedPeer:
enable: true
type: super
clusterID: 1
manager:
enable: false
externalManager:
enable: true
host: "dragonfly-manager.dragonfly-system.svc.cluster.local"
restPort: 8080
grpcPort: 65003
redis:
enable: false
externalRedis:
addrs:
- redis.example.com:6379
password: dragonfly
mysql:
enable: false
```
Install dragonfly-stack chart with release name `dragonfly`:
```shell
helm repo add dragonfly-stack https://dragonflyoss.github.io/helm-charts/
helm install --create-namespace --namespace dragonfly-system dragonfly dragonfly/dragonfly-stack -f values.yaml
```
## Uninstall
Uninstall the `dragonfly` deployment:
```shell
helm delete dragonfly --namespace dragonfly-system
```
{{ template "chart.valuesSection" . }}
## Chart dependencies
{{ template "chart.requirementsTable" . }}

File diff suppressed because it is too large Load Diff

View File

@ -1,60 +1,45 @@
apiVersion: v2
name: dragonfly
description: Dragonfly is an intelligent P2P based image and file distribution system
icon: https://raw.githubusercontent.com/dragonflyoss/dragonfly/main/docs/images/logo/dragonfly.svg
icon: https://raw.githubusercontent.com/dragonflyoss/Dragonfly2/main/docs/en/images/logo/dragonfly.svg
type: application
version: 1.4.4
appVersion: 2.3.1-rc.2
version: "0.5.39"
appVersion: "0.5.39"
keywords:
- dragonfly
- d7y
- P2P
- image
maintainers:
- name: gaius-qi
email: gaius.qi@gmail.com
- name: yxxhero
email: aiopsclub@163.com
- name: jim3ma
email: majinjing3@gmail.com
- dragonfly
- d7y
- P2P
- image
home: https://d7y.io/
sources:
- https://github.com/dragonflyoss/dragonfly
- https://github.com/dragonflyoss/client
- https://github.com/dragonflyoss/Dragonfly2
annotations:
artifacthub.io/changes: |
- Bump Dragonfly to v2.3.1-rc.2.
- Bump Client to v1.0.9.
- Update dragonfly image version to v2.0.2-rc.5
artifacthub.io/links: |
- name: Chart Source
url: https://github.com/dragonflyoss/helm-charts
- name: Source
url: https://github.com/dragonflyoss/dragonfly
- name: Source
url: https://github.com/dragonflyoss/client
url: https://github.com/dragonflyoss/Dragonfly2
artifacthub.io/images: |
- name: manager
image: dragonflyoss/manager:v2.3.1-rc.2
image: dragonflyoss/manager:v2.0.2-rc.5
- name: cdn
image: dragonflyoss/cdn:v2.0.2-rc.5
- name: dfdaemon
image: dragonflyoss/dfdaemon:v2.0.2-rc.5
- name: scheduler
image: dragonflyoss/scheduler:v2.3.1-rc.2
- name: client
image: dragonflyoss/client:v1.0.9
- name: seed-client
image: dragonflyoss/client:v1.0.9
- name: dfinit
image: dragonflyoss/dfinit:v1.0.9
image: dragonflyoss/scheduler:v2.0.2-rc.5
dependencies:
- name: mysql
version: 10.1.1
repository: https://charts.bitnami.com/bitnami
condition: mysql.enable
- name: redis
version: 19.5.5
repository: https://charts.bitnami.com/bitnami
condition: redis.enable
- name: mysql
version: "8.0.0"
repository: "https://charts.bitnami.com/bitnami"
condition: mysql.enable
- name: redis
version: "12.1.0"
repository: "https://charts.bitnami.com/bitnami"
condition: redis.enable

File diff suppressed because one or more lines are too long

View File

@ -1,6 +1,6 @@
# Dragonfly Helm Chart
[![Artifact Hub](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/dragonfly)](https://artifacthub.io/packages/search?repo=dragonfly)
{{ template "chart.badgesSection" . }}
Provide efficient, stable, secure, low-cost file and image distribution services to be the best practice and standard solution in the related Cloud-Native area.
@ -16,24 +16,19 @@ helm install --create-namespace --namespace dragonfly-system dragonfly dragonfly
Dragonfly is an open source intelligent P2P based image and file distribution system. Its goal is to tackle all distribution problems in cloud native scenarios. Currently Dragonfly focuses on being:
- Simple: well-defined user-facing API (HTTP), non-invasive to all container engines;
- Efficient: Seed peer support, P2P based file distribution to save enterprise bandwidth;
- Efficient: CDN support, P2P based file distribution to save enterprise bandwidth;
- Intelligent: host level speed limit, intelligent flow control due to host detection;
- Secure: block transmission encryption, HTTPS connection support.
Dragonfly is now hosted by the Cloud Native Computing Foundation (CNCF) as an Incubating Level Project. Originally it was born to solve all kinds of distribution at very large scales, such as application distribution, cache distribution, log distribution, image distribution, and so on.
## Prerequisites
- Kubernetes cluster 1.20+
- Helm v3.8.0+
## Installation Guide
When use Dragonfly in Kubernetes, a container runtime must be configured. These work can be done by init script in this charts.
For more detail about installation is available in [Kubernetes with Dragonfly](https://d7y.io/docs/getting-started/quick-start/kubernetes/)
For more detail about installation is available in [Kubernetes with Dragonfly](https://github.com/dragonflyoss/Dragonfly2/blob/main/docs/en/ecosystem/Kubernetes-with-Dragonfly.md)
We recommend read the details about [Kubernetes with Dragonfly](https://d7y.io/docs/getting-started/quick-start/kubernetes/) before install.
We recommend read the details about [Kubernetes with Dragonfly](https://github.com/dragonflyoss/Dragonfly2/blob/main/docs/en/ecosystem/Kubernetes-with-Dragonfly.md) before install.
> **We did not recommend to using dragonfly with docker in Kubernetes** due to many reasons: 1. no fallback image pulling policy. 2. deprecated in Kubernetes.
@ -59,9 +54,9 @@ redis:
enable: false
externalRedis:
addrs:
- redis.example.com:6379
host: redis-host
password: dragonfly
port: 6379
```
Install dragonfly chart with release name `dragonfly`:
@ -73,7 +68,7 @@ helm install --create-namespace --namespace dragonfly-system dragonfly dragonfly
### Install with an existing manager
Create the `values.yaml` configuration file. Need to configure the cluster id associated with scheduler and seed peer. This example is to deploy a cluster using the existing manager and redis.
Create the `values.yaml` configuration file. Need to configure the cluster id associated with scheduler and cdn. This example is to deploy a cluster using the existing manager and redis.
```yaml
scheduler:
@ -81,12 +76,11 @@ scheduler:
manager:
schedulerClusterID: 1
seedClient:
cdn:
config:
seedPeer:
enable: true
type: super
clusterID: 1
base:
manager:
cdnClusterID: 1
manager:
enable: false
@ -101,9 +95,9 @@ redis:
enable: false
externalRedis:
addrs:
- redis.example.com:6379
host: redis-host
password: dragonfly
port: 6379
mysql:
enable: false

View File

@ -1,14 +1,21 @@
1. Get the manager address by running these commands:
export MANAGER_POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ include "dragonfly.fullname" . }},release={{ .Release.Name }},component=manager" -o jsonpath={.items[0].metadata.name})
export MANAGER_CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $MANAGER_POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
kubectl --namespace {{ .Release.Namespace }} port-forward $MANAGER_POD_NAME 8080:$MANAGER_CONTAINER_PORT
echo "Visit http://127.0.0.1:8080 to use your manager"
2. Get the scheduler address by running these commands:
1. Get the scheduler address by running these commands:
export SCHEDULER_POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ include "dragonfly.fullname" . }},release={{ .Release.Name }},component=scheduler" -o jsonpath={.items[0].metadata.name})
export SCHEDULER_CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $SCHEDULER_POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
kubectl --namespace {{ .Release.Namespace }} port-forward $SCHEDULER_POD_NAME 8002:$SCHEDULER_CONTAINER_PORT
echo "Visit http://127.0.0.1:8002 to use your scheduler"
2. Get the dfdaemon port by running these commands:
export DFDAEMON_POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ include "dragonfly.fullname" . }},release={{ .Release.Name }},component=dfdaemon" -o jsonpath={.items[0].metadata.name})
export DFDAEMON_CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $DFDAEMON_POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
You can use $DFDAEMON_CONTAINER_PORT as a proxy port in Node.
3. Configure runtime to use dragonfly:
https://d7y.io/docs/getting-started/quick-start/kubernetes/
https://github.com/dragonflyoss/Dragonfly2/blob/main/docs/en/quick-start.md
{{ if .Values.jaeger.enable }}
4. Get Jaeger query URL by running these commands:
export JAEGER_QUERY_PORT=$(kubectl --namespace {{ .Release.Namespace }} get services jaeger-all-in-one-query -o jsonpath="{.spec.ports[0].port}")
export JAEGER_QUERY_POD=$(kubectl --namespace {{ .Release.Namespace }} get pod -l app.kubernetes.io/name=jaeger-all-in-one -o jsonpath='{range .items[*]}{.metadata.name}{end}' | head -n 1)
kubectl --namespace {{ .Release.Namespace }} port-forward $JAEGER_QUERY_POD 16686:$JAEGER_QUERY_PORT
echo "Visit http://127.0.0.1:16686/search?limit=20&lookback=1h&maxDuration&minDuration&service=dragonfly to query download events"
{{- end }}

View File

@ -6,13 +6,6 @@ Expand the name of the chart.
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Allow the release namespace to be overridden for multi-namespace deployments in combined charts.
*/}}
{{- define "common.names.namespace" -}}
{{- default .Release.Namespace .Values.namespaceOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
@ -30,14 +23,6 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this
{{- end -}}
{{- end -}}
{{/*
Create a default fully qualified manager name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "dragonfly.manager.fullname" -}}
{{ template "dragonfly.fullname" . }}-{{ .Values.manager.name }}
{{- end -}}
{{/*
Create a default fully qualified scheduler name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
@ -47,138 +32,33 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this
{{- end -}}
{{/*
Create a default fully qualified client name.
Create a default fully qualified cdn name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "dragonfly.client.fullname" -}}
{{ template "dragonfly.fullname" . }}-{{ .Values.client.name }}
{{- end -}}
{{- define "dragonfly.cdn.fullname" -}}
{{ template "dragonfly.fullname" . }}-{{ .Values.cdn.name }}
{{- end -}}{
{{/*
Create a default fully qualified seed client name.
Create a default fully qualified cdn name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "dragonfly.seedClient.fullname" -}}
{{ template "dragonfly.fullname" . }}-{{ .Values.seedClient.name }}
{{- end -}}
{{- define "dragonfly.manager.fullname" -}}
{{ template "dragonfly.fullname" . }}-{{ .Values.manager.name }}
{{- end -}}{
{{/*
Create a default fully qualified dfinit name.
Create a default fully qualified cdn's nginx name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "dragonfly.dfinit.fullname" -}}
{{ template "dragonfly.fullname" . }}-dfinit
{{- end -}}
{{/* vim: set filetype=mustache: */}}
{{/*
Return the proper image name
{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" .Values.global ) }}
*/}}
{{- define "common.images.image" -}}
{{- $registryName := .imageRoot.registry -}}
{{- $repositoryName := .imageRoot.repository -}}
{{- $separator := ":" -}}
{{- $termination := .imageRoot.tag | toString -}}
{{- if .global }}
{{- if .global.imageRegistry }}
{{- $registryName = .global.imageRegistry -}}
{{- end -}}
{{- end -}}
{{- if .imageRoot.digest }}
{{- $separator = "@" -}}
{{- $termination = .imageRoot.digest | toString -}}
{{- end -}}
{{- if $registryName }}
{{- printf "%s/%s%s%s" $registryName $repositoryName $separator $termination -}}
{{- else -}}
{{- printf "%s%s%s" $repositoryName $separator $termination -}}
{{- end -}}
{{- end -}}
{{- define "dragonfly.cdn.nginx.name" -}}
{{ template "dragonfly.fullname" . }}-{{ .Values.cdn.nginx.name }}
{{- end -}}{
{{/*
Return the proper image name (for the manager image)
Create a default fully qualified dfdaemon name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "manager.image" -}}
{{- include "common.images.image" ( dict "imageRoot" .Values.manager.image "global" .Values.global ) -}}
{{- end -}}
{{/*
Return the proper image name (for the scheduler image)
*/}}
{{- define "scheduler.image" -}}
{{- include "common.images.image" ( dict "imageRoot" .Values.scheduler.image "global" .Values.global ) -}}
{{- end -}}
{{/*
Return the proper image name (for the client image)
*/}}
{{- define "client.image" -}}
{{- include "common.images.image" ( dict "imageRoot" .Values.client.image "global" .Values.global ) -}}
{{- end -}}
{{/*
Return the proper image name (for the seedClient image)
*/}}
{{- define "seedClient.image" -}}
{{- include "common.images.image" ( dict "imageRoot" .Values.seedClient.image "global" .Values.global ) -}}
{{- end -}}
{{/*
Return the proper image name (for the client dfinit image)
*/}}
{{- define "client.dfinit.image" -}}
{{- include "common.images.image" ( dict "imageRoot" .Values.client.dfinit.image "global" .Values.global ) -}}
{{- end -}}
{{/*
Return the proper image name (for the manager initContainer image)
*/}}
{{- define "manager.initContainer.image" -}}
{{- include "common.images.image" ( dict "imageRoot" .Values.manager.initContainer.image "global" .Values.global ) -}}
{{- end -}}
{{/*
Return the proper image name (for the scheduler initContainer image)
*/}}
{{- define "scheduler.initContainer.image" -}}
{{- include "common.images.image" ( dict "imageRoot" .Values.scheduler.initContainer.image "global" .Values.global ) -}}
{{- end -}}
{{/*
Return the proper image name (for the client initContainer image)
*/}}
{{- define "client.initContainer.image" -}}
{{- include "common.images.image" ( dict "imageRoot" .Values.client.initContainer.image "global" .Values.global ) -}}
{{- end -}}
{{/*
Return the proper image name (for the seedClient initContainer image)
*/}}
{{- define "seedClient.initContainer.image" -}}
{{- include "common.images.image" ( dict "imageRoot" .Values.seedClient.initContainer.image "global" .Values.global ) -}}
{{- end -}}
{{/* vim: set filetype=mustache: */}}
{{/*
Return the proper Storage Class
{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }}
*/}}
{{- define "common.storage.class" -}}
{{- $storageClass := .persistence.storageClass -}}
{{- if .global -}}
{{- if .global.storageClass -}}
{{- $storageClass = .global.storageClass -}}
{{- end -}}
{{- end -}}
{{- if $storageClass -}}
{{- if (eq "-" $storageClass) -}}
{{- printf "storageClassName: \"\"" -}}
{{- else }}
{{- printf "storageClassName: %s" $storageClass -}}
{{- end -}}
{{- end -}}
{{- define "dragonfly.dfdaemon.fullname" -}}
{{ template "dragonfly.fullname" . }}-{{ .Values.dfdaemon.name }}
{{- end -}}

View File

@ -0,0 +1,107 @@
{{- if .Values.cdn.enable }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "dragonfly.cdn.fullname" . }}
labels:
app: {{ template "dragonfly.fullname" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: {{ .Values.cdn.name }}
data:
cdn.yaml: |-
base:
listenPort: {{ .Values.cdn.containerPort }}
downloadPort: {{ .Values.cdn.nginxContiainerPort }}
systemReservedBandwidth: {{ .Values.cdn.config.base.systemReservedBandwidth }}
maxBandwidth: {{ .Values.cdn.config.base.maxBandwidth }}
enableProfiler: {{ .Values.cdn.config.base.enableProfiler }}
failAccessInterval: {{ .Values.cdn.config.base.failAccessInterval }}
gcInitialDelay: {{ .Values.cdn.config.base.gcInitialDelay }}
gcMetaInterval: {{ .Values.cdn.config.base.gcMetaInterval }}
gcStorageInterval: {{ .Values.cdn.config.base.gcStorageInterval }}
taskExpireTime: {{ .Values.cdn.config.base.taskExpireTime }}
storagePattern: {{ .Values.cdn.config.base.storagePattern }}
logDir: {{ .Values.cdn.config.base.logDir }}
nodeSelector:
{{ toYaml .Values.cdn.nodeSelector | indent 8 }}
manager:
{{- if .Values.manager.enable }}
addr: {{ template "dragonfly.manager.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.manager.grpcPort }}
{{- else }}
addr: {{ .Values.externalManager.host }}:{{ .Values.externalManager.grpcPort }}
{{- end }}
cdnClusterID: {{ .Values.cdn.config.base.manager.cdnClusterID }}
keepAlive:
{{ toYaml .Values.cdn.config.base.manager.keepAlive | indent 10 }}
console: {{ .Values.cdn.config.base.console }}
{{- if .Values.cdn.metrics.enable }}
metrics:
addr: ":8000"
{{- end }}
console: {{ .Values.cdn.config.console }}
verbose: {{ .Values.cdn.config.verbose }}
{{- if .Values.cdn.config.verbose }}
pprof-port: {{ .Values.cdn.config.pprofPort }}
{{- end }}
{{- if .Values.cdn.config.jaeger }}
jaeger: {{ .Values.cdn.config.jaeger }}
{{- else if .Values.jaeger.enable }}
jaeger: http://jaeger-all-in-one-collector.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }}:14268/api/traces
{{- end }}
plugins:
{{ toYaml .Values.cdn.config.plugins | indent 6 }}
nginx.conf: |-
worker_rlimit_nofile 100000;
events {
use epoll;
worker_connections 20480;
}
http {
include mime.types;
default_type application/octet-stream;
root /home/admin/cai/htdocs;
sendfile on;
tcp_nopush on;
server_tokens off;
keepalive_timeout 5;
client_header_timeout 1m;
send_timeout 1m;
client_max_body_size 3m;
index index.html index.htm;
access_log on;
log_not_found on;
gzip on;
gzip_http_version 1.0;
gzip_comp_level 6;
gzip_min_length 1024;
gzip_proxied any;
gzip_vary on;
gzip_disable msie6;
gzip_buffers 96 8k;
gzip_types text/xml text/plain text/css application/javascript application/x-javascript application/rss+xml application/json;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Web-Server-Type nginx;
proxy_set_header WL-Proxy-Client-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_redirect off;
proxy_buffers 128 8k;
proxy_intercept_errors on;
server {
listen {{ .Values.cdn.nginxContiainerPort }};
location / {
root {{ (index .Values.cdn.config.plugins.storageDriver 0).config.baseDir }};
}
}
}
{{- end }}

View File

@ -0,0 +1,135 @@
{{- if .Values.cdn.enable }}
apiVersion: apps/v1
kind: StatefulSet
metadata:
labels:
app: {{ template "dragonfly.fullname" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: {{ .Values.cdn.name }}
name: {{ template "dragonfly.cdn.fullname" . }}
{{- if .Values.cdn.statefulsetAnnotations }}
annotations:
{{ toYaml .Values.cdn.statefulsetAnnotations | indent 4 }}
{{- end }}
spec:
replicas: {{ .Values.cdn.replicas }}
selector:
matchLabels:
app: {{ template "dragonfly.fullname" . }}
component: {{ .Values.cdn.name }}
release: {{ .Release.Name }}
serviceName: cdn
template:
metadata:
labels:
app: {{ template "dragonfly.fullname" . }}
component: {{ .Values.cdn.name }}
release: {{ .Release.Name }}
{{- if .Values.cdn.podLabels }}
{{ toYaml .Values.cdn.podLabels | indent 8 }}
{{- end }}
{{- if .Values.cdn.podAnnotations }}
annotations:
{{ toYaml .Values.cdn.podAnnotations | indent 8 }}
{{- end }}
spec:
{{- if .Values.cdn.nodeSelector }}
nodeSelector:
{{ toYaml .Values.cdn.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.cdn.tolerations }}
tolerations:
{{ toYaml .Values.cdn.tolerations | indent 8 }}
{{- end }}
{{- if .Values.cdn.affinity }}
affinity:
{{ toYaml .Values.cdn.affinity | indent 8 }}
{{- end }}
{{- if quote .Values.cdn.terminationGracePeriodSeconds }}
terminationGracePeriodSeconds: {{ .Values.cdn.terminationGracePeriodSeconds }}
{{- end }}
{{- if and (.Capabilities.APIVersions.Has "scheduling.k8s.io/v1beta1") (.Values.cdn.priorityClassName) }}
priorityClassName: {{ .Values.cdn.priorityClassName }}
{{- end }}
{{- if .Values.cdn.hostAliases }}
hostAliases:
{{ toYaml .Values.cdn.hostAliases | indent 8 }}
{{- end }}
initContainers:
- name: wait-for-manager
image: {{ .Values.cdn.initContainer.image }}:{{ .Values.cdn.initContainer.tag }}
imagePullPolicy: {{ .Values.cdn.initContainer.pullPolicy }}
{{- if .Values.manager.enable }}
command: ['sh', '-c', 'until nslookup {{ template "dragonfly.manager.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} && nc -vz {{ template "dragonfly.manager.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} {{ .Values.manager.restPort }}; do echo waiting for manager; sleep 2; done;']
{{- else }}
command: ['sh', '-c', 'until nslookup {{ .Values.externalManager.host }} && nc -vz {{ .Values.externalManager.host }} {{ .Values.externalManager.restPort }}; do echo waiting for external manager; sleep 2; done;']
{{- end }}
containers:
- name: cdn
image: "{{ .Values.cdn.image }}:{{ .Values.cdn.tag }}"
imagePullPolicy: {{ .Values.cdn.pullPolicy | quote }}
resources:
{{ toYaml .Values.cdn.resources | indent 12 }}
ports:
- containerPort: {{ .Values.cdn.containerPort }}
protocol: TCP
- containerPort: {{ .Values.cdn.nginxContiainerPort }}
protocol: TCP
{{- if .Values.cdn.metrics.enable }}
- containerPort: 8000
protocol: TCP
{{- end }}
volumeMounts:
- name: config
mountPath: "/etc/dragonfly"
- name: nginx-config
mountPath: "/etc/nginx/nginx.conf"
subPath: "nginx.conf"
- name: storage
mountPath: {{ (index .Values.cdn.config.plugins.storageDriver 0).config.baseDir }}
{{- if .Values.cdn.extraVolumeMounts }}
{{- toYaml .Values.cdn.extraVolumeMounts | nindent 8 }}
{{- end }}
volumes:
- name: config
configMap:
name: {{ template "dragonfly.cdn.fullname" $ }}
items:
- key: cdn.yaml
path: cdn.yaml
- name: nginx-config
configMap:
name: {{ template "dragonfly.cdn.fullname" $ }}
{{- if not (.Values.cdn.persistence.enable) }}
- name: storage
emptyDir: {}
{{- end }}
{{- if .Values.cdn.extraVolumes }}
{{- toYaml .Values.cdn.extraVolumes | nindent 6 }}
{{- end }}
{{- if .Values.cdn.persistence.enable }}
volumeClaimTemplates:
- metadata:
name: storage
{{- range $key, $value := .Values.cdn.persistence.annotations }}
{{ $key }}: {{ $value }}
{{- end }}
spec:
accessModes:
{{- range .Values.cdn.persistence.accessModes }}
- {{ . | quote }}
{{- end }}
resources:
requests:
storage: {{ .Values.cdn.persistence.size | quote }}
{{- if .Values.cdn.persistence.storageClass }}
{{- if (eq "-" .Values.cdn.persistence.storageClass) }}
storageClassName: ""
{{- else }}
storageClassName: "{{ .Values.cdn.persistence.storageClass }}"
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,30 @@
{{- if .Values.cdn.metrics.enable }}
apiVersion: v1
kind: Service
metadata:
name: {{ template "dragonfly.cdn.fullname" . }}-metrics
labels:
app: {{ template "dragonfly.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: {{ .Values.cdn.name }}-metrics
{{- if .Values.cdn.metrics.service.labels }}
{{ toYaml .Values.metrics.service.labels | indent 4 }}
{{- end }}
{{- if .Values.cdn.metrics.service.annotations }}
annotations:
{{ toYaml .Values.cdn.metrics.service.annotations | indent 4 }}
{{- end }}
spec:
type: {{ .Values.cdn.metrics.service.type }}
ports:
- port: 8000
name: http-metrics
targetPort: 8000
protocol: TCP
selector:
app: {{ template "dragonfly.fullname" . }}
component: {{ .Values.cdn.name }}
release: {{ .Release.Name }}
{{- end }}

View File

@ -0,0 +1,21 @@
{{- if and .Values.cdn.metrics.enable .Values.cdn.metrics.prometheusRule.enable }}
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: {{ template "dragonfly.cdn.fullname" . }}
labels:
app: {{ template "dragonfly.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: {{ .Values.cdn.name }}
{{- if .Values.cdn.metrics.prometheusRule.additionalLabels }}
{{ toYaml .Values.cdn.metrics.prometheusRule.additionalLabels | indent 4 }}
{{- end }}
spec:
{{- with .Values.cdn.metrics.prometheusRule.rules }}
groups:
- name: {{ template "dragonfly.cdn.fullname" $ }}
rules: {{ tpl (toYaml .) $ | nindent 8 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,32 @@
{{- if and .Values.cdn.metrics.enable .Values.cdn.metrics.serviceMonitor.enable }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "dragonfly.cdn.fullname" . }}
labels:
app: {{ template "dragonfly.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: {{ .Values.cdn.name }}
{{- if .Values.cdn.metrics.serviceMonitor.additionalLabels }}
{{ toYaml .Values.cdn.metrics.serviceMonitor.additionalLabels | indent 4 }}
{{- end }}
spec:
endpoints:
- port: http-metrics
{{- if .Values.cdn.metrics.serviceMonitor.interval }}
interval: {{ .Values.cdn.metrics.serviceMonitor.interval }}
{{- end }}
{{- if .Values.cdn.metrics.serviceMonitor.scrapeTimeout }}
scrapeTimeout: {{ .Values.cdn.metrics.serviceMonitor.scrapeTimeout }}
{{- end }}
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
selector:
matchLabels:
app: {{ template "dragonfly.fullname" . }}
release: {{ .Release.Name }}
component: {{ .Values.cdn.name }}-metrics
{{- end }}

View File

@ -1,48 +0,0 @@
{{- if .Values.client.enable }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "dragonfly.client.fullname" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels:
app: {{ template "dragonfly.fullname" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: {{ .Values.client.name }}
data:
dfdaemon.yaml: |-
host:
{{ toYaml .Values.client.config.host | indent 6 }}
server:
{{ toYaml .Values.client.config.server | indent 6 }}
download:
{{ toYaml .Values.client.config.download | indent 6 }}
upload:
{{ toYaml .Values.client.config.upload | indent 6 }}
manager:
{{- if .Values.client.config.manager.addrs }}
addr: {{ .Values.client.config.manager.addr }}
{{- else if .Values.manager.enable }}
addr: http://{{ template "dragonfly.manager.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.manager.grpcPort }}
{{- else }}
addr: http://{{ .Values.externalManager.host }}:{{ .Values.externalManager.grpcPort }}
{{- end }}
scheduler:
{{ toYaml .Values.client.config.scheduler | indent 6 }}
dynconfig:
{{ toYaml .Values.client.config.dynconfig | indent 6 }}
storage:
{{ toYaml .Values.client.config.storage | indent 6 }}
gc:
{{ toYaml .Values.client.config.gc | indent 6 }}
proxy:
{{ toYaml .Values.client.config.proxy | indent 6 }}
health:
{{ toYaml .Values.client.config.health | indent 6 }}
metrics:
{{ toYaml .Values.client.config.metrics | indent 6 }}
stats:
{{ toYaml .Values.client.config.stats | indent 6 }}
tracing:
{{ toYaml .Values.client.config.tracing | indent 6 }}
{{- end }}

View File

@ -1,226 +0,0 @@
{{- if .Values.client.enable }}
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: {{ template "dragonfly.client.fullname" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels:
app: {{ template "dragonfly.fullname" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: {{ .Values.client.name }}
annotations:
{{- if .Values.client.daemonsetAnnotations }}
{{ toYaml .Values.client.daemonsetAnnotations | indent 4 }}
{{- end }}
spec:
{{- if .Values.client.updateStrategy }}
updateStrategy:
{{ toYaml .Values.client.updateStrategy | indent 4 }}
{{- end }}
selector:
matchLabels:
app: {{ template "dragonfly.fullname" . }}
component: "{{ .Values.client.name }}"
template:
metadata:
labels:
app: {{ template "dragonfly.fullname" . }}
component: "{{ .Values.client.name }}"
{{- if .Values.client.podLabels }}
{{ toYaml .Values.client.podLabels | indent 8 }}
{{- end }}
annotations:
checksum/config: {{ include (print $.Template.BasePath "/client/client-configmap.yaml") . | sha256sum }}
{{- if .Values.client.dfinit.enable }}
checksum/dfinit-config: {{ include (print $.Template.BasePath "/client/dfinit-configmap.yaml") . | sha256sum }}
{{- end }}
{{- if .Values.client.podAnnotations }}
{{ toYaml .Values.client.podAnnotations | indent 8 }}
{{- end }}
spec:
hostNetwork: {{ .Values.client.hostNetwork }}
{{- if .Values.client.hostNetwork }}
dnsPolicy: "ClusterFirstWithHostNet"
{{- end }}
hostPID: {{ .Values.client.hostPID }}
hostIPC: {{ .Values.client.hostIPC }}
{{- with .Values.client.nodeSelector | default .Values.global.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.client.tolerations }}
tolerations:
{{ toYaml .Values.client.tolerations | indent 8 }}
{{- end }}
{{- if .Values.client.affinity }}
affinity:
{{ toYaml .Values.client.affinity | indent 8 }}
{{- end }}
{{- if quote .Values.client.terminationGracePeriodSeconds }}
terminationGracePeriodSeconds: {{ .Values.client.terminationGracePeriodSeconds }}
{{- end }}
{{- if .Values.client.priorityClassName }}
priorityClassName: {{ .Values.client.priorityClassName }}
{{- end }}
{{- with .Values.client.image.pullSecrets | default .Values.global.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.client.hostAliases }}
hostAliases:
{{ toYaml .Values.client.hostAliases | indent 8 }}
{{- end }}
initContainers:
{{- if .Values.scheduler.enable }}
- name: wait-for-scheduler
image: {{ template "client.initContainer.image" . }}
imagePullPolicy: {{ .Values.client.initContainer.image.pullPolicy }}
command: ['sh', '-c', 'until nslookup {{ template "dragonfly.scheduler.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} && nc -vz {{ template "dragonfly.scheduler.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} {{ .Values.scheduler.config.server.port }}; do echo waiting for scheduler; sleep 2; done;']
{{- end }}
{{- if .Values.client.dfinit.enable }}
- name: dfinit
image: {{ template "client.dfinit.image" . }}
imagePullPolicy: {{ .Values.client.dfinit.image.pullPolicy }}
args:
- --log-level={{ .Values.client.dfinit.config.log.level }}
{{- if .Values.client.dfinit.config.console }}
- --console
{{- end }}
resources:
{{ toYaml .Values.client.initContainer.resources | indent 10 }}
volumeMounts:
- name: dfinit-config
mountPath: "/etc/dragonfly"
{{- if and (.Values.client.dfinit.enable) (.Values.client.dfinit.config.containerRuntime.containerd) }}
- name: containerd-config-dir
mountPath: {{ dir .Values.client.dfinit.config.containerRuntime.containerd.configPath }}
{{- end }}
{{- if and (.Values.client.dfinit.enable) (.Values.client.dfinit.config.containerRuntime.crio) }}
- name: crio-config-dir
mountPath: {{ dir .Values.client.dfinit.config.containerRuntime.crio.configPath }}
{{- end }}
{{- if and (.Values.client.dfinit.enable) (.Values.client.dfinit.config.containerRuntime.podman) }}
- name: podman-config-dir
mountPath: {{ dir .Values.client.dfinit.config.containerRuntime.podman.configPath }}
{{- end }}
{{- if and (.Values.client.dfinit.enable) (.Values.client.dfinit.config.containerRuntime.docker) }}
- name: docker-config-dir
mountPath: {{ dir .Values.client.dfinit.config.containerRuntime.docker.configPath }}
{{- end }}
{{- end }}
{{- if and .Values.client.dfinit.enable .Values.client.dfinit.restartContainerRuntime }}
- name: restart-container-runtime
image: {{ template "client.initContainer.image" . }}
imagePullPolicy: {{ .Values.client.initContainer.image.pullPolicy }}
securityContext:
# nsenter need privilege permission.
privileged: true
command:
- /bin/sh
- -cx
- |-
{{- if .Values.client.dfinit.config.containerRuntime.containerd }}
nsenter -t 1 -m -- systemctl restart containerd.service
echo "restart container"
{{- else if .Values.client.dfinit.config.containerRuntime.crio }}
nsenter -t 1 -m -- systemctl restart crio.service
echo "restart cri-o"
{{- else if .Values.client.dfinit.config.containerRuntime.podman }}
nsenter -t 1 -m -- systemctl restart podman.service
echo "restart podman"
{{- else if .Values.client.dfinit.config.containerRuntime.docker }}
nsenter -t 1 -m -- systemctl restart docker.service
echo "restart docker"
{{- else }}
echo "no container runtime to restart"
{{- end }}
resources:
{{ toYaml .Values.client.initContainer.resources | indent 10 }}
{{- end }}
containers:
- name: client
image: {{ template "client.image" . }}
imagePullPolicy: {{ .Values.client.image.pullPolicy | quote }}
args:
- --log-level={{ .Values.client.config.log.level }}
{{- if .Values.client.config.console }}
- --console
{{- end }}
resources:
{{ toYaml .Values.client.resources | indent 10 }}
env:
{{- if .Values.client.maxProcs }}
- name: GOMAXPROCS
value: {{ .Values.client.maxProcs }}
{{- end }}
ports:
- containerPort: {{ .Values.client.config.upload.server.port }}
protocol: TCP
- containerPort: {{ .Values.client.config.health.server.port }}
protocol: TCP
- containerPort: {{ .Values.client.config.metrics.server.port }}
protocol: TCP
- containerPort: {{ .Values.client.config.stats.server.port }}
protocol: TCP
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=unix://{{ .Values.client.config.download.server.socketPath }}"]
initialDelaySeconds: 5
periodSeconds: 30
timeoutSeconds: 5
livenessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=unix://{{ .Values.client.config.download.server.socketPath }}"]
initialDelaySeconds: 15
periodSeconds: 30
timeoutSeconds: 5
volumeMounts:
- name: config
mountPath: "/etc/dragonfly"
- name: socket-dir
mountPath: /var/run/dragonfly
{{- if .Values.client.extraVolumeMounts }}
{{- toYaml .Values.client.extraVolumeMounts | nindent 8 }}
{{- end }}
volumes:
- name: config
configMap:
name: {{ template "dragonfly.client.fullname" . }}
- name: socket-dir
hostPath:
path: /var/run/dragonfly
type: DirectoryOrCreate
{{- if .Values.client.dfinit.enable }}
- name: dfinit-config
configMap:
name: {{ template "dragonfly.dfinit.fullname" . }}
{{- end }}
{{- if and (.Values.client.dfinit.enable) (.Values.client.dfinit.config.containerRuntime.containerd) }}
- name: containerd-config-dir
hostPath:
path: {{ dir .Values.client.dfinit.config.containerRuntime.containerd.configPath }}
type: DirectoryOrCreate
{{- end }}
{{- if and (.Values.client.dfinit.enable) (.Values.client.dfinit.config.containerRuntime.crio) }}
- name: crio-config-dir
hostPath:
path: {{ dir .Values.client.dfinit.config.containerRuntime.crio.configPath }}
type: DirectoryOrCreate
{{- end }}
{{- if and (.Values.client.dfinit.enable) (.Values.client.dfinit.config.containerRuntime.podman) }}
- name: podman-config-dir
hostPath:
path: {{ dir .Values.client.dfinit.config.containerRuntime.podman.configPath }}
type: DirectoryOrCreate
{{- end }}
{{- if and (.Values.client.dfinit.enable) (.Values.client.dfinit.config.containerRuntime.docker) }}
- name: docker-config-dir
hostPath:
path: {{ dir .Values.client.dfinit.config.containerRuntime.docker.configPath }}
type: DirectoryOrCreate
{{- end }}
{{- if .Values.client.extraVolumes }}
{{- toYaml .Values.client.extraVolumes | nindent 6 }}
{{- end }}
{{- end }}

View File

@ -1,18 +0,0 @@
{{- if .Values.client.dfinit.enable }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "dragonfly.dfinit.fullname" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels:
app: {{ template "dragonfly.fullname" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: {{ .Values.client.name }}
data:
dfinit.yaml: |-
proxy:
addr: {{ .Values.client.dfinit.config.proxy.addr }}
containerRuntime:
{{ toYaml .Values.client.dfinit.config.containerRuntime | indent 6 }}
{{- end }}

View File

@ -1,30 +0,0 @@
{{- if .Values.client.metrics.enable }}
apiVersion: v1
kind: Service
metadata:
name: {{ template "dragonfly.client.fullname" . }}-metrics
namespace: {{ include "common.names.namespace" . | quote }}
labels:
app: {{ template "dragonfly.name" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: {{ .Values.client.name }}-metrics
{{- if .Values.client.metrics.service.labels }}
{{ toYaml .Values.client.metrics.service.labels | indent 4 }}
{{- end }}
{{- if .Values.client.metrics.service.annotations }}
annotations:
{{ toYaml .Values.client.metrics.service.annotations | indent 4 }}
{{- end }}
spec:
type: {{ .Values.client.metrics.service.type }}
ports:
- port: {{ .Values.client.config.metrics.server.port }}
name: http-metrics
appProtocol: http
protocol: TCP
targetPort: {{ .Values.client.config.metrics.server.port }}
selector:
app: {{ template "dragonfly.fullname" . }}
component: {{ .Values.client.name }}
{{- end }}

View File

@ -1,20 +0,0 @@
{{- if and .Values.client.metrics.enable .Values.client.metrics.prometheusRule.enable }}
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: {{ template "dragonfly.client.fullname" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels:
app: {{ template "dragonfly.name" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: {{ .Values.client.name }}
{{- if .Values.client.metrics.prometheusRule.additionalLabels }}
{{ toYaml .Values.client.metrics.prometheusRule.additionalLabels | indent 4 }}
{{- end }}
spec:
groups:
- name: {{ template "dragonfly.client.fullname" $ }}
rules:
{{ toYaml .Values.client.metrics.prometheusRule.rules | indent 8 }}
{{- end }}

View File

@ -1,31 +0,0 @@
{{- if and .Values.client.metrics.enable .Values.client.metrics.serviceMonitor.enable }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "dragonfly.client.fullname" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels:
app: {{ template "dragonfly.name" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: {{ .Values.client.name }}
{{- if .Values.client.metrics.serviceMonitor.additionalLabels }}
{{ toYaml .Values.client.metrics.serviceMonitor.additionalLabels | indent 4 }}
{{- end }}
spec:
endpoints:
- port: http-metrics
{{- if .Values.client.metrics.serviceMonitor.interval }}
interval: {{ .Values.client.metrics.serviceMonitor.interval }}
{{- end }}
{{- if .Values.client.metrics.serviceMonitor.scrapeTimeout }}
scrapeTimeout: {{ .Values.client.metrics.serviceMonitor.scrapeTimeout }}
{{- end }}
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
selector:
matchLabels:
app: {{ template "dragonfly.name" . }}
component: {{ .Values.client.name }}-metrics
{{- end }}

View File

@ -0,0 +1,90 @@
{{- if .Values.dfdaemon.enable }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "dragonfly.dfdaemon.fullname" . }}
labels:
app: {{ template "dragonfly.fullname" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: {{ .Values.dfdaemon.name }}
data:
dfget.yaml: |-
aliveTime: {{ .Values.dfdaemon.config.aliveTime }}
gcInterval: {{ .Values.dfdaemon.config.gcInterval }}
keepStorage: {{ .Values.dfdaemon.config.keepStorage }}
workHome: {{ .Values.dfdaemon.config.workHome }}
cacheDir: {{ .Values.dfdaemon.config.cacheDir }}
dataDir: {{ .Values.dfdaemon.config.dataDir }}
logDir: {{ .Values.dfdaemon.config.logDir }}
console: {{ .Values.dfdaemon.config.console }}
verbose: {{ .Values.dfdaemon.config.verbose }}
{{- if .Values.dfdaemon.config.verbose }}
pprof-port: {{ .Values.dfdaemon.config.pprofPort }}
{{- end }}
{{- if .Values.dfdaemon.config.metrics }}
metrics: {{ .Values.dfdaemon.config.metrics }}
{{- end }}
{{- if .Values.dfdaemon.config.jaeger }}
jaeger: {{ .Values.dfdaemon.config.jaeger }}
{{- else if .Values.jaeger.enable }}
jaeger: http://jaeger-all-in-one-collector.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }}:14268/api/traces
{{- end }}
scheduler:
manager:
enable: {{ .Values.dfdaemon.config.scheduler.manager.enable }}
netAddrs:
{{- if and (.Values.dfdaemon.config.scheduler.manager.enable) (.Values.dfdaemon.config.scheduler.manager.netAddrs) }}
{{ toYaml .Values.dfdaemon.config.scheduler.manager.netAddrs | indent 10 }}
{{- else if .Values.manager.enable }}
- type: tcp
addr: {{ template "dragonfly.manager.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.manager.grpcPort }}
{{- else }}
- type: tcp
addr: {{ .Values.externalManager.host }}:{{ .Values.externalManager.grpcPort }}
{{- end }}
refreshInterval: {{ .Values.dfdaemon.config.scheduler.manager.refreshInterval }}
scheduleTimeout: {{ .Values.dfdaemon.config.scheduler.scheduleTimeout }}
disableAutoBackSource: {{ .Values.dfdaemon.config.scheduler.disableAutoBackSource }}
host:
{{ toYaml .Values.dfdaemon.config.host | indent 6 }}
download:
{{ toYaml .Values.dfdaemon.config.download | indent 6 }}
upload:
{{ toYaml .Values.dfdaemon.config.upload | indent 6 }}
storage:
{{ toYaml .Values.dfdaemon.config.storage | indent 6 }}
proxy:
defaultFilter: {{ .Values.dfdaemon.config.proxy.defaultFilter }}
tcpListen:
{{- if not .Values.dfdaemon.hostNetwork }}
namespace: {{ .Values.dfdaemon.config.proxy.tcpListen.namespace }}
{{- end }}
listen: {{ .Values.dfdaemon.config.proxy.tcpListen.listen }}
port: {{ .Values.dfdaemon.containerPort }}
security:
{{ toYaml .Values.dfdaemon.config.proxy.security | indent 8 }}
registryMirror:
{{ toYaml .Values.dfdaemon.config.proxy.registryMirror | indent 8 }}
proxies:
{{ toYaml .Values.dfdaemon.config.proxy.proxies | indent 8 }}
{{- if .Values.containerRuntime.docker.enable }}
hijackHTTPS:
cert: /etc/dragonfly-ca/cacert.pem
key: /etc/dragonfly-ca/cakey.pem
hosts:
- regx: .*
insecure: {{ .Values.containerRuntime.docker.insecure }}
{{- if and .Values.containerRuntime.docker.injectHosts (not .Values.containerRuntime.docker.restart) }}
sni:
{{- range .Values.containerRuntime.docker.registryPorts }}
- listen: 127.0.0.1
port: {{ . }}
{{- if not $.Values.dfdaemon.hostNetwork }}
namespace: {{ $.Values.dfdaemon.config.proxy.tcpListen.namespace }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,512 @@
{{- if .Values.dfdaemon.enable }}
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: {{ template "dragonfly.dfdaemon.fullname" . }}
labels:
app: {{ template "dragonfly.fullname" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: {{ .Values.dfdaemon.name }}
{{- if .Values.dfdaemon.daemonsetAnnotations }}
annotations:
{{ toYaml .Values.dfdaemon.daemonsetAnnotations | indent 4 }}
{{- end }}
spec:
selector:
matchLabels:
app: {{ template "dragonfly.fullname" . }}
component: "{{ .Values.dfdaemon.name }}"
release: {{ .Release.Name }}
template:
metadata:
labels:
app: {{ template "dragonfly.fullname" . }}
component: "{{ .Values.dfdaemon.name }}"
release: {{ .Release.Name }}
{{- if .Values.dfdaemon.podLabels }}
{{ toYaml .Values.dfdaemon.podLabels | indent 8 }}
{{- end }}
{{- if .Values.dfdaemon.podAnnotations }}
annotations:
{{ toYaml .Values.dfdaemon.podAnnotations | indent 8 }}
{{- end }}
spec:
{{- if .Values.dfdaemon.nodeSelector }}
nodeSelector:
{{ toYaml .Values.dfdaemon.nodeSelector | indent 8 }}
{{- end }}
hostNetwork: {{ .Values.dfdaemon.hostNetwork }}
{{- if .Values.dfdaemon.hostNetwork }}
dnsPolicy: "ClusterFirstWithHostNet"
{{- end }}
hostPID: {{ or (and .Values.containerRuntime.docker.enable .Values.containerRuntime.docker.restart) .Values.containerRuntime.containerd.enable .Values.containerRuntime.crio.enable }}
{{- if .Values.dfdaemon.tolerations }}
tolerations:
{{ toYaml .Values.dfdaemon.tolerations | indent 8 }}
{{- end }}
{{- if .Values.dfdaemon.affinity }}
affinity:
{{ toYaml .Values.dfdaemon.affinity | indent 8 }}
{{- end }}
{{- if quote .Values.dfdaemon.terminationGracePeriodSeconds }}
terminationGracePeriodSeconds: {{ .Values.dfdaemon.terminationGracePeriodSeconds }}
{{- end }}
{{- if and (.Capabilities.APIVersions.Has "scheduling.k8s.io/v1beta1") (.Values.dfdaemon.priorityClassName) }}
priorityClassName: {{ .Values.dfdaemon.priorityClassName }}
{{- end }}
{{- if .Values.dfdaemon.hostAliases }}
hostAliases:
{{ toYaml .Values.dfdaemon.hostAliases | indent 8 }}
{{- end }}
containers:
- name: dfdaemon
image: "{{ .Values.dfdaemon.image }}:{{ .Values.dfdaemon.tag }}"
imagePullPolicy: {{ .Values.dfdaemon.pullPolicy | quote }}
resources:
{{ toYaml .Values.dfdaemon.resources | indent 12 }}
ports:
- containerPort: {{ .Values.dfdaemon.containerPort }}
{{- if and (not .Values.dfdaemon.hostNetwork) (empty .Values.dfdaemon.config.proxy.tcpListen.namespace) }}
hostPort: {{ .Values.dfdaemon.hostPort }}
hostIP: 127.0.0.1
{{- end }}
protocol: TCP
{{- if and .Values.containerRuntime.docker.enable (not .Values.containerRuntime.docker.restart) }}
{{- if .Values.containerRuntime.docker.injectHosts }}
lifecycle:
postStart:
exec:
command:
- "/bin/sh"
- "-c"
- |
# inject hosts after dfdaemon started
domains="{{- join " " .Values.containerRuntime.docker.registryDomains }}"
# remove static dns in pod /etc/hosts, which injected by host network
sed -i '/# Dragonfly SNI Host/d' /etc/hosts
if [[ -n "$domains" ]]; then
for domain in $domains; do
# inject static dns into /host/etc/hosts
if grep "127.0.0.1 $domain" /host/etc/hosts; then
echo "Dragonfly SNI Host $domain Found in /host/etc/hosts"
continue
else
echo "Try to add dragonfly SNI host $domain"
echo "127.0.0.1 $domain # Dragonfly SNI Host $domain" >> /host/etc/hosts
echo "Dragonfly SNI host $domain added"
fi
done
fi
preStop:
exec:
command:
- "/bin/sh"
- "-c"
- |
# when stop dfdaemon, clean up injected hosts info in /etc/hosts for current node
echo "$(sed '/# Dragonfly SNI Host/d' /host/etc/hosts)" > /host/etc/hosts
{{- end }}
{{- end }}
volumeMounts:
- name: config
mountPath: "/etc/dragonfly"
{{- if and .Values.containerRuntime.docker.enable (not .Values.containerRuntime.docker.restart) }}
{{- if and .Values.containerRuntime.docker.injectHosts }}
- name: etc
mountPath: /host/etc
{{- end }}
{{- end }}
{{- if .Values.containerRuntime.docker.enable }}
- name: d7y-ca
mountPath: /etc/dragonfly-ca
{{- end }}
{{- if .Values.dfdaemon.extraVolumeMounts }}
{{- toYaml .Values.dfdaemon.extraVolumeMounts | nindent 8 }}
{{- end }}
{{- if and (not .Values.dfdaemon.hostNetwork) .Values.dfdaemon.config.proxy.tcpListen.namespace }}
- name: run
mountPath: /run/dragonfly
- name: data
mountPath: {{ .Values.dfdaemon.config.dataDir }}
securityContext:
capabilities:
add:
- SYS_ADMIN
{{- end }}
{{- if or (and (not .Values.dfdaemon.hostNetwork) .Values.dfdaemon.config.proxy.tcpListen.namespace) .Values.containerRuntime.containerd.enable .Values.containerRuntime.docker.enable }}
initContainers:
{{- if and (not .Values.dfdaemon.hostNetwork) .Values.dfdaemon.config.proxy.tcpListen.namespace }}
- name: mount-netns
image: "{{ .Values.dfdaemon.image }}:{{ .Values.dfdaemon.tag }}"
imagePullPolicy: {{ .Values.dfdaemon.pullPolicy | quote }}
resources:
{{ toYaml .Values.dfdaemon.resources | indent 12 }}
# FIXME dfget daemon only need /proc/1/ns/net and CAP_SYS_ADMIN
# but containerd resolves the symbolic of /proc/1/ns/net from v1.5.0
# due to /proc/1/ns/net is not a regular symbolic link, it always failed.
# for keeping only CAP_SYS_ADMIN capability, use init container to bind mount only netns to /run/dragonfly/net
# https://github.com/containerd/containerd/blob/v1.5.0/pkg/cri/opts/spec_linux.go#L171
command:
- /bin/sh
- -cx
- |-
if [ ! -e "/run/dragonfly/net" ]; then
touch /run/dragonfly/net
fi
i1=$(stat -L -c %i /host/ns/net)
i2=$(stat -L -c %i /run/dragonfly/net)
if [ "$i1" != "$i2" ]; then
/bin/mount -o bind /host/ns/net /run/dragonfly/net
fi
volumeMounts:
- name: hostns
mountPath: /host/ns
- name: run
mountPath: /run/dragonfly
# bind mount need Bidirectional to propagate into host
mountPropagation: Bidirectional
securityContext:
# open /proc/1/ns need privilege permission
privileged: true
{{- end }}
{{- if .Values.containerRuntime.docker.enable }}
- name: update-docker-config
image: "{{ .Values.containerRuntime.initContainerImage }}"
imagePullPolicy: {{ .Values.dfdaemon.pullPolicy | quote }}
resources:
{{ toYaml .Values.dfdaemon.resources | indent 12 }}
command:
- /bin/sh
- -cx
- |-
mkdir -p /tmp/dragonfly-ca
cd /tmp/dragonfly-ca
openssl genrsa -out cakey.pem 2048
cat << EOF > root.conf
[ req ]
default_bits = 2048
default_keyfile = key.pem
default_md = sha256
distinguished_name = req_distinguished_name
req_extensions = req_ext
string_mask = nombstr
x509_extensions = x509_ext
[ req_distinguished_name ]
countryName = Country Name (2 letter code)
countryName_default = {{.Values.containerRuntime.docker.caCert.countryName}}
stateOrProvinceName = State or Province Name (full name)
stateOrProvinceName_default = {{.Values.containerRuntime.docker.caCert.stateOrProvinceName}}
localityName = Locality Name (eg, city)
localityName_default = {{.Values.containerRuntime.docker.caCert.localityName}}
organizationName = Organization Name (eg, company)
organizationName_default = {{.Values.containerRuntime.docker.caCert.organizationName}}
commonName = Common Name (e.g. server FQDN or YOUR name)
commonName_max = 64
commonName_default = {{.Values.containerRuntime.docker.caCert.commonName}}
[ x509_ext ]
authorityKeyIdentifier = keyid,issuer
basicConstraints = CA:TRUE
keyUsage = digitalSignature, keyEncipherment, keyCertSign, cRLSign
subjectKeyIdentifier = hash
[ req_ext ]
basicConstraints = CA:TRUE
keyUsage = digitalSignature, keyEncipherment, keyCertSign, cRLSign
subjectKeyIdentifier = hash
EOF
openssl req -batch -new -x509 -key ./cakey.pem -out ./cacert.pem -days 65536 -config ./root.conf
openssl x509 -inform PEM -in ./cacert.pem -outform DER -out ./CA.cer
openssl x509 -in ./cacert.pem -noout -text
# update ca for golang program(docker in host), refer: https://github.com/golang/go/blob/go1.17/src/crypto/x509/root_linux.go#L8
ca_list="/etc/ssl/certs/ca-certificates.crt /etc/pki/tls/certs/ca-bundle.crt /etc/ssl/ca-bundle.pem /etc/pki/tls/cacert.pem /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem /etc/ssl/cert.pem"
for ca in $ca_list; do
ca="/host$ca"
if [[ -e "$ca" ]]; then
echo "CA $ca" found
if grep "Dragonfly Authority CA" "$ca"; then
echo "Dragonfly Authority ca found"
if [[ -e /host/etc/dragonfly-ca/cakey.pem && -e /host/etc/dragonfly-ca/cacert.pem ]]; then
echo "CA cert and key ready"
break
else
echo "Warning: CA cert and key not ready"
fi
fi
echo "Try to add Dragonfly CA"
echo "# Dragonfly Authority CA" > cacert.toadd.pem
cat cacert.pem >> cacert.toadd.pem
cat cacert.toadd.pem >> "$ca"
echo "Dragonfly CA added"
cp -f ./cakey.pem ./cacert.pem /host/etc/dragonfly-ca/
break
fi
done
{{- if not .Values.containerRuntime.docker.restart }}
domains="{{- join " " .Values.containerRuntime.docker.registryDomains }}"
if [[ -n "$domains" ]]; then
for domain in $domains; do
# inject docker cert by registry domain
dir=/host/etc/docker/certs.d/$domain
mkdir -p "$dir"
echo copy CA cert to $dir
cp -f /host/etc/dragonfly-ca/cacert.pem "$dir/ca.crt"
done
fi
{{- end }}
{{- if .Values.containerRuntime.docker.restart }}
# inject docker proxy setting and restart docker
# currently, without host pid in container, we can not nsenter with pid and can not invoke systemctl correctly.
status=$(nsenter -t 1 -m systemctl -- status docker --no-pager | grep http-proxy.conf)
if [[ -n "$status" ]]; then
echo Docker proxy already enabled, skip
else
echo Try to inject proxy and restart docker
path=$(nsenter -t 1 -m systemctl -- show -p FragmentPath docker.service | grep -o "/.*")
if [[ -z "$path" ]]; then
echo docker.service not found
exit 1
fi
nsenter -t 1 -m mkdir -- -p "$path".d
nsenter -t 1 -m sh -- -c "echo '[Service]' > $path.d/http-proxy.conf"
nsenter -t 1 -m sh -- -c "echo 'Environment=\"HTTP_PROXY=http://127.0.0.1:{{ .Values.dfdaemon.hostPort}}\"' >> $path.d/http-proxy.conf"
nsenter -t 1 -m sh -- -c "echo 'Environment=\"HTTPS_PROXY=http://127.0.0.1:{{ .Values.dfdaemon.hostPort}}\"' >> $path.d/http-proxy.conf"
nsenter -t 1 -m sh -- -c "echo 'Environment=\"NO_PROXY={{ join "," .Values.containerRuntime.docker.skipHosts }}\"' >> $path.d/http-proxy.conf"
nsenter -t 1 -m systemctl -- daemon-reload
nsenter -t 1 -m systemctl -- restart docker.service
fi
{{- end }}
volumeMounts:
- name: etc
mountPath: /host/etc
{{- if .Values.containerRuntime.docker.restart }}
securityContext:
# nsenter need privilege permission
privileged: true
{{- end }}
{{- else if .Values.containerRuntime.containerd.enable }}
- name: update-containerd
image: "{{ .Values.containerRuntime.initContainerImage }}"
imagePullPolicy: {{ .Values.dfdaemon.pullPolicy | quote }}
resources:
{{ toYaml .Values.dfdaemon.resources | indent 12 }}
command:
- /bin/sh
- -cx
- |-
etcContainerd=/host/etc/containerd
if [[ -e $etcContainerd/config.toml ]]; then
echo containerd config found
else
echo $etcContainerd/config.toml not found
exit 1
fi
cat $etcContainerd/config.toml
registries="{{- join " " .Values.containerRuntime.containerd.registries }}"
if [[ -n "$domains" ]]; then
echo empty registry domains
exit 1
fi
# detect containerd config version
need_restart=0
if grep "version[^=]*=[^2]*2" $etcContainerd/config.toml; then
# inject v2 mirror setting
# get config_path if set
{{- if .Values.containerRuntime.containerd.injectConfigPath }}
config_path=$etcContainerd/certs.d
{{- else }}
config_path=$(cat $etcContainerd/config.toml | tr '"' ' ' | grep config_path | awk '{print $3}')
{{- end }}
if [[ -z "$config_path" ]]; then
echo config_path is not enabled, just add one mirror in config.toml
# parse registry domain
registry={{ .Values.dfdaemon.config.proxy.registryMirror.url}}
domain=$(echo $registry | sed -e "s,https://,," | sed "s,:.*,,")
# inject registry
if grep "registry.mirrors.\"$domain\"" $etcContainerd/config.toml; then
# TODO merge mirrors
echo "registry $registry found in config.toml, skip"
else
cat << EOF >> $etcContainerd/config.toml
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."$domain"]
endpoint = ["http://127.0.0.1:{{ .Values.dfdaemon.hostPort}}","$registry"]
EOF
echo "Registry $domain added"
need_restart=1
fi
else
echo config_path is enabled, add mirror in $config_path
# TODO check whether config_path is enabled, if not, add it
tmp=$(cat $etcContainerd/config.toml | tr '"' ' ' | grep config_path | awk '{print $3}')
if [[ -z "$tmp" ]]; then
echo inject config_path into $etcContainerd/config.toml
cat << EOF >> $etcContainerd/config.toml
[plugins."io.containerd.grpc.v1.cri".registry]
config_path = "/etc/containerd/certs.d"
EOF
fi
mkdir -p $etcContainerd/certs.d
for registry in $registries; do
# parse registry domain
domain=$(echo $registry | sed -e "s,http.://,," | sed "s,:.*,,")
# inject registry
mkdir -p $etcContainerd/certs.d/$domain
if [[ -e "$etcContainerd/certs.d/$domain/hosts.toml" ]]; then
echo "registry $registry found in config.toml, skip"
continue
else
cat << EOF >> $etcContainerd/certs.d/$domain/hosts.toml
server = "$registry"
[host."http://127.0.0.1:{{ .Values.dfdaemon.hostPort}}"]
capabilities = ["pull", "resolve"]
[host."http://127.0.0.1:{{ .Values.dfdaemon.hostPort}}".header]
X-Dragonfly-Registry = ["$registry"]
EOF
echo "Registry $domain added"
{{- if not .Values.containerRuntime.containerd.injectConfigPath }}
need_restart=1
{{- end }}
fi
done
fi
else
# inject legacy v1 mirror setting
echo containerd config is version 1, just only support one mirror in config.toml
# parse registry domain
registry={{ .Values.dfdaemon.config.proxy.registryMirror.url}}
domain=$(echo {{ .Values.dfdaemon.config.proxy.registryMirror.url}} | sed -e "s,http.://,," | sed "s,:.*,,")
# inject registry
if grep "registry.mirrors.\"$domain\"" $etcContainerd/config.toml; then
# TODO merge mirrors
echo "registry $registry found in config.toml, skip"
else
cat << EOF >> $etcContainerd/config.toml
[plugins.cri.registry.mirrors."$domain"]
endpoint = ["http://127.0.0.1:{{ .Values.dfdaemon.hostPort}}","$registry"]
EOF
echo "Registry $domain added"
need_restart=1
fi
fi
# restart containerd
# currently, without host pid in container, we can not nsenter with pid and can not invoke systemctl correctly.
if [[ "$need_restart" -gt 0 ]]; then
nsenter -t 1 -m systemctl -- restart containerd.service
fi
volumeMounts:
- name: etc
mountPath: /host/etc
securityContext:
# nsenter need privilege permission
privileged: true
{{- else if .Values.containerRuntime.crio.enable }}
- name: update-crio
image: "{{ .Values.containerRuntime.initContainerImage }}"
imagePullPolicy: {{ .Values.dfdaemon.pullPolicy | quote }}
resources:
{{ toYaml .Values.dfdaemon.resources | indent 12 }}
command:
- /bin/sh
- -cx
- |-
registries="{{- join " " .Values.containerRuntime.crio.registries }}"
if [[ -n "$domains" ]]; then
echo Empty registry domains
exit 1
fi
confd="/host/etc/containers/registries.conf.d"
if [[ ! -e "$confd" ]]; then
mkdir -p "$confd"
fi
for registry in $registries; do
# parse registry domain
domain=$(echo $registry | sed "s,http.://,," | sed "s,:.*,,")
schema=$(echo $registry | sed "s,://.*,,")
port=$(echo $registry | sed "s,http.://,," | sed "s,[^:]*,," | sed "s,:,,")
insecure=false
if [[ "$schema" == "http" ]]; then
insecure=true
fi
if [[ -z "$port" ]]; then
if [[ "$schema" == "https" ]]; then
port=443
elif [[ "$schema" == "http" ]]; then
port=80
fi
fi
echo schema: $schema, domain: $domain, port: $port
# inject registry
if [[ -e "$confd/$domain.conf" ]]; then
echo "registry $registry found in $confd, skip"
continue
else
cat << EOF > "$confd/$domain.conf"
[[registry]]
prefix = "$domain"
location = "$domain:$port"
insecure = $insecure
[[registry.mirror]]
location = "127.0.0.1:{{ .Values.dfdaemon.hostPort}}"
insecure = true
EOF
echo "Registry $domain added"
fi
done
nsenter -t 1 -m systemctl -- reload crio.service
volumeMounts:
- name: etc
mountPath: /host/etc
securityContext:
# nsenter need privilege permission
privileged: true
{{- end }}
{{- end }}
volumes:
- name: config
configMap:
name: {{ template "dragonfly.dfdaemon.fullname" . }}
{{- if and (not .Values.dfdaemon.hostNetwork) .Values.dfdaemon.config.proxy.tcpListen.namespace }}
- name: hostns
hostPath:
path: /proc/1/ns
- name: run
hostPath:
path: /run/dragonfly
type: DirectoryOrCreate
{{- end }}
{{- if .Values.containerRuntime.docker.enable }}
- name: etc
hostPath:
path: /etc
- name: d7y-ca
hostPath:
path: /etc/dragonfly-ca
type: DirectoryOrCreate
{{- else if or .Values.containerRuntime.containerd.enable .Values.containerRuntime.crio.enable }}
- name: etc
hostPath:
path: /etc
{{- end }}
- name: data
{{- if .Values.dfdaemon.mountDataDirAsHostPath }}
hostPath:
path: {{ .Values.dfdaemon.config.dataDir }}
type: DirectoryOrCreate
{{- else }}
emptyDir: {}
{{- end }}
{{- if .Values.dfdaemon.extraVolumes }}
{{- toYaml .Values.dfdaemon.extraVolumes | nindent 6 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,48 @@
{{- if .Values.jaeger.enable }}
apiVersion: v1
kind: ConfigMap
metadata:
labels:
app: jaeger
app.kubernetes.io/component: ui-configuration
app.kubernetes.io/instance: jaeger-all-in-one
app.kubernetes.io/name: jaeger-all-in-one-ui-configuration
app.kubernetes.io/part-of: jaeger
name: jaeger-all-in-one-ui-configuration
data:
ui: |-
{
"menu": [
{
"items": [
{
"label": "Documentation",
"url": "https://www.jaegertracing.io/docs/1.23"
}
],
"label": "About"
}
]
}
---
apiVersion: v1
kind: ConfigMap
metadata:
labels:
app: jaeger
app.kubernetes.io/component: sampling-configuration
app.kubernetes.io/instance: jaeger-all-in-one
app.kubernetes.io/name: jaeger-all-in-one-sampling-configuration
app.kubernetes.io/part-of: jaeger
name: jaeger-all-in-one-sampling-configuration
data:
sampling: |-
{
"default_strategy": {
"param": 1,
"type": "probabilistic"
}
}
{{- end }}

View File

@ -0,0 +1,99 @@
{{- if .Values.jaeger.enable }}
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: jaeger
app.kubernetes.io/component: all-in-one
app.kubernetes.io/instance: jaeger-all-in-one
app.kubernetes.io/name: jaeger-all-in-one
app.kubernetes.io/part-of: jaeger
name: jaeger-all-in-one
spec:
selector:
matchLabels:
app: jaeger
app.kubernetes.io/component: all-in-one
app.kubernetes.io/instance: jaeger-all-in-one
app.kubernetes.io/name: jaeger-all-in-one
app.kubernetes.io/part-of: jaeger
strategy: {}
template:
metadata:
labels:
app: jaeger
app.kubernetes.io/component: all-in-one
app.kubernetes.io/instance: jaeger-all-in-one
app.kubernetes.io/name: jaeger-all-in-one
app.kubernetes.io/part-of: jaeger
spec:
containers:
- args:
- --query.ui-config=/etc/config/ui.json
- --sampling.strategies-file=/etc/jaeger/sampling/sampling.json
env:
- name: SPAN_STORAGE_TYPE
value: memory
- name: COLLECTOR_ZIPKIN_HTTP_PORT
value: "9411"
image: jaegertracing/all-in-one:1.23.0
livenessProbe:
failureThreshold: 5
httpGet:
path: /
port: 14269
initialDelaySeconds: 5
periodSeconds: 15
name: jaeger
ports:
- containerPort: 5775
name: zk-compact-trft
protocol: UDP
- containerPort: 5778
name: config-rest
- containerPort: 6831
name: jg-compact-trft
protocol: UDP
- containerPort: 6832
name: jg-binary-trft
protocol: UDP
- containerPort: 9411
name: zipkin
- containerPort: 14267
name: c-tchan-trft
- containerPort: 14268
name: c-binary-trft
- containerPort: 16686
name: query
- containerPort: 14269
name: admin-http
- containerPort: 14250
name: grpc
readinessProbe:
httpGet:
path: /
port: 14269
initialDelaySeconds: 1
resources: {}
volumeMounts:
- mountPath: /etc/config
name: jaeger-all-in-one-ui-configuration-volume
readOnly: true
- mountPath: /etc/jaeger/sampling
name: jaeger-all-in-one-sampling-configuration-volume
readOnly: true
serviceAccountName: jaeger-all-in-one
volumes:
- configMap:
items:
- key: ui
path: ui.json
name: jaeger-all-in-one-ui-configuration
name: jaeger-all-in-one-ui-configuration-volume
- configMap:
items:
- key: sampling
path: sampling.json
name: jaeger-all-in-one-sampling-configuration
name: jaeger-all-in-one-sampling-configuration-volume
{{- end }}

View File

@ -0,0 +1,127 @@
{{- if .Values.jaeger.enable }}
apiVersion: v1
kind: Service
metadata:
labels:
app: jaeger
app.kubernetes.io/component: service-collector
app.kubernetes.io/instance: jaeger-all-in-one
app.kubernetes.io/name: jaeger-all-in-one-collector
app.kubernetes.io/part-of: jaeger
name: jaeger-all-in-one-collector-headless
spec:
clusterIP: None
ports:
- name: http-zipkin
port: 9411
targetPort: 0
- name: http-grpc
port: 14250
targetPort: 0
- name: c-tchan-trft
port: 14267
targetPort: 0
- name: http-c-binary-trft
port: 14268
targetPort: 0
selector:
app: jaeger
app.kubernetes.io/component: all-in-one
app.kubernetes.io/instance: jaeger-all-in-one
app.kubernetes.io/name: jaeger-all-in-one
app.kubernetes.io/part-of: jaeger
---
apiVersion: v1
kind: Service
metadata:
labels:
app: jaeger
app.kubernetes.io/component: service-collector
app.kubernetes.io/instance: jaeger-all-in-one
app.kubernetes.io/name: jaeger-all-in-one-collector
app.kubernetes.io/part-of: jaeger
name: jaeger-all-in-one-collector
spec:
ports:
- name: http-zipkin
port: 9411
targetPort: 0
- name: http-grpc
port: 14250
targetPort: 0
- name: c-tchan-trft
port: 14267
targetPort: 0
- name: http-c-binary-trft
port: 14268
targetPort: 0
selector:
app: jaeger
app.kubernetes.io/component: all-in-one
app.kubernetes.io/instance: jaeger-all-in-one
app.kubernetes.io/name: jaeger-all-in-one
app.kubernetes.io/part-of: jaeger
---
apiVersion: v1
kind: Service
metadata:
labels:
app: jaeger
app.kubernetes.io/component: service-query
app.kubernetes.io/instance: jaeger-all-in-one
app.kubernetes.io/name: jaeger-all-in-one-query
app.kubernetes.io/part-of: jaeger
name: jaeger-all-in-one-query
spec:
ports:
- name: http-query
port: 16686
targetPort: 16686
selector:
app: jaeger
app.kubernetes.io/component: all-in-one
app.kubernetes.io/instance: jaeger-all-in-one
app.kubernetes.io/name: jaeger-all-in-one
app.kubernetes.io/part-of: jaeger
---
apiVersion: v1
kind: Service
metadata:
labels:
app: jaeger
app.kubernetes.io/component: service-agent
app.kubernetes.io/instance: jaeger-all-in-one
app.kubernetes.io/name: jaeger-all-in-one-agent
app.kubernetes.io/part-of: jaeger
name: jaeger-all-in-one-agent
spec:
clusterIP: None
ports:
- name: zk-compact-trft
port: 5775
protocol: UDP
targetPort: 0
- name: config-rest
port: 5778
targetPort: 0
- name: jg-compact-trft
port: 6831
protocol: UDP
targetPort: 0
- name: jg-binary-trft
port: 6832
protocol: UDP
targetPort: 0
selector:
app: jaeger
app.kubernetes.io/component: all-in-one
app.kubernetes.io/instance: jaeger-all-in-one
app.kubernetes.io/name: jaeger-all-in-one
app.kubernetes.io/part-of: jaeger
{{- end }}

View File

@ -0,0 +1,12 @@
{{- if .Values.jaeger.enable }}
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: jaeger
app.kubernetes.io/component: service-account
app.kubernetes.io/instance: jaeger-all-in-one
app.kubernetes.io/name: jaeger-all-in-one
app.kubernetes.io/part-of: jaeger
name: jaeger-all-in-one
{{- end }}

View File

@ -3,9 +3,9 @@ apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "dragonfly.manager.fullname" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels:
app: {{ template "dragonfly.fullname" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: {{ .Values.manager.name }}
@ -14,23 +14,10 @@ data:
server:
rest:
addr: :{{ .Values.manager.restPort }}
{{- if and .Values.manager.config.server.rest.tls.cert .Values.manager.config.server.rest.tls.cert}}
tls:
cert: {{ .Values.manager.config.server.rest.tls.cert }}
key: {{ .Values.manager.config.server.rest.tls.key }}
{{- end }}
grpc:
advertiseIP: {{ .Values.manager.config.server.grpc.advertiseIP }}
port:
start: {{ .Values.manager.grpcPort }}
end: {{ .Values.manager.grpcPort }}
workHome: {{ .Values.manager.config.server.workHome }}
logLevel: {{ .Values.manager.config.server.logLevel }}
logDir: {{ .Values.manager.config.server.logDir }}
cacheDir: {{ .Values.manager.config.server.cacheDir }}
pluginDir: {{ .Values.manager.config.server.pluginDir }}
auth:
{{ toYaml .Values.manager.config.auth | indent 6 }}
database:
mysql:
{{- if and .Values.mysql.enable (empty .Values.externalMysql.host)}}
@ -49,33 +36,27 @@ data:
migrate: {{ .Values.externalMysql.migrate }}
{{- end }}
redis:
{{- if .Values.redis.enable }}
addrs:
- {{ .Release.Name }}-{{ default "redis" .Values.redis.fullname }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.redis.master.service.ports.redis }}
password: {{ .Values.redis.auth.password }}
{{- if and .Values.redis.enable (empty .Values.externalRedis.host) }}
password: {{ .Values.redis.password }}
host: {{ .Release.Name }}-{{ default "redis" .Values.redis.fullname }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}
port: {{ .Values.redis.service.port }}
{{- else }}
addrs:
{{ toYaml .Values.externalRedis.addrs | indent 10 }}
masterName: {{ .Values.externalRedis.masterName }}
username: {{ .Values.externalRedis.username }}
password: {{ .Values.externalRedis.password }}
sentinelUsername: {{ .Values.externalRedis.sentinelUsername }}
sentinelPassword: {{ .Values.externalRedis.sentinelPassword }}
db: {{ .Values.externalRedis.db }}
brokerDB: {{ .Values.externalRedis.brokerDB }}
backendDB: {{ .Values.externalRedis.backendDB }}
host: {{ .Values.externalRedis.host }}
port: {{ .Values.externalRedis.port }}
{{- end }}
cache:
{{ toYaml .Values.manager.config.cache | indent 6 }}
job:
{{ toYaml .Values.manager.config.job | indent 6 }}
network:
{{ toYaml .Values.manager.config.network | indent 6 }}
{{- if .Values.manager.metrics.enable }}
metrics:
enable: {{ .Values.manager.metrics.enable }}
addr: ":8000"
{{- end }}
console: {{ .Values.manager.config.console }}
pprofPort: {{ .Values.manager.config.pprofPort }}
tracing:
{{ toYaml .Values.manager.config.tracing | indent 6 }}
verbose: {{ .Values.manager.config.verbose }}
{{- if .Values.manager.config.verbose }}
pprof-port: {{ .Values.manager.config.pprofPort }}
{{- end }}
{{- if .Values.manager.config.jaeger }}
jaeger: {{ .Values.manager.config.jaeger }}
{{- else if .Values.jaeger.enable }}
jaeger: http://jaeger-all-in-one-collector.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }}:14268/api/traces
{{- end }}
{{- end }}

View File

@ -4,45 +4,39 @@ kind: Deployment
metadata:
labels:
app: {{ template "dragonfly.fullname" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: {{ .Values.manager.name }}
name: {{ template "dragonfly.manager.fullname" . }}
namespace: {{ include "common.names.namespace" . | quote }}
annotations:
{{- if .Values.manager.deploymentAnnotations }}
annotations:
{{ toYaml .Values.manager.deploymentAnnotations | indent 4 }}
{{- end }}
spec:
{{- if .Values.manager.updateStrategy }}
strategy: {{- toYaml .Values.manager.updateStrategy | nindent 4 }}
{{- end }}
replicas: {{ .Values.manager.replicas }}
selector:
matchLabels:
app: {{ template "dragonfly.fullname" . }}
component: {{ .Values.manager.name }}
release: {{ .Release.Name }}
template:
metadata:
labels:
app: {{ template "dragonfly.fullname" . }}
component: {{ .Values.manager.name }}
release: {{ .Release.Name }}
{{- if .Values.manager.podLabels }}
{{ toYaml .Values.manager.podLabels | indent 8 }}
{{- end }}
annotations:
checksum/config: {{ include (print $.Template.BasePath "/manager/manager-configmap.yaml") . | sha256sum }}
{{- if .Values.manager.podAnnotations }}
annotations:
{{ toYaml .Values.manager.podAnnotations | indent 8 }}
{{- end }}
spec:
hostNetwork: {{ .Values.manager.hostNetwork }}
{{- if .Values.manager.hostNetwork }}
dnsPolicy: "ClusterFirstWithHostNet"
{{- end }}
{{- with .Values.manager.nodeSelector | default .Values.global.nodeSelector }}
{{- if .Values.manager.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{ toYaml .Values.manager.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.manager.tolerations }}
tolerations:
@ -55,47 +49,34 @@ spec:
{{- if quote .Values.manager.terminationGracePeriodSeconds }}
terminationGracePeriodSeconds: {{ .Values.manager.terminationGracePeriodSeconds }}
{{- end }}
{{- if .Values.scheduler.priorityClassName }}
{{- if and (.Capabilities.APIVersions.Has "scheduling.k8s.io/v1beta1") (.Values.scheduler.priorityClassName) }}
priorityClassName: {{ .Values.manager.priorityClassName }}
{{- end }}
{{- with .Values.manager.image.pullSecrets | default .Values.global.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if or .Values.redis.enable .Values.mysql.enable }}
{{- if .Values.manager.hostAliases }}
hostAliases:
{{ toYaml .Values.manager.hostAliases | indent 8 }}
{{- end }}
{{- if or .Values.redis.enable .Values.mysql.enable }}
initContainers:
{{- if .Values.redis.enable }}
- name: wait-for-redis
image: {{ template "manager.initContainer.image" . }}
imagePullPolicy: {{ .Values.manager.initContainer.image.pullPolicy }}
command: ['sh', '-c', 'until nslookup {{ .Release.Name }}-{{ default "redis" .Values.redis.fullname }}-master.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }} && nc -vz {{ .Release.Name }}-{{ default "redis" .Values.redis.fullname }}-master.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }} {{ .Values.redis.master.service.ports.redis }}; do echo waiting for redis; sleep 2; done;']
resources:
{{ toYaml .Values.manager.initContainer.resources | indent 10 }}
image: {{ .Values.manager.initContainer.image }}:{{ .Values.manager.initContainer.tag }}
imagePullPolicy: {{ .Values.manager.initContainer.pullPolicy }}
command: ['sh', '-c', 'until nslookup {{ .Release.Name }}-{{ default "redis" .Values.redis.fullname }}-master.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }} && nc -vz {{ .Release.Name }}-{{ default "redis" .Values.redis.fullname }}-master.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }} {{ .Values.redis.service.port }}; do echo waiting for redis; sleep 2; done;']
{{- end }}
{{- if .Values.mysql.enable }}
- name: wait-for-mysql
image: {{ template "manager.initContainer.image" . }}
imagePullPolicy: {{ .Values.manager.initContainer.image.pullPolicy }}
image: {{ .Values.manager.initContainer.image }}:{{ .Values.manager.initContainer.tag }}
imagePullPolicy: {{ .Values.manager.initContainer.pullPolicy }}
command: ['sh', '-c', 'until nslookup {{ .Release.Name }}-{{ default "mysql" .Values.mysql.fullname }}.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }} && nc -vz {{ .Release.Name }}-{{ default "mysql" .Values.mysql.fullname }}.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }} {{ .Values.mysql.primary.service.port }}; do echo waiting for mysql; sleep 2; done;']
resources:
{{ toYaml .Values.manager.initContainer.resources | indent 10 }}
{{- end }}
{{- end }}
containers:
- name: manager
image: {{ template "manager.image" . }}
imagePullPolicy: {{ .Values.manager.image.pullPolicy | quote }}
image: "{{ .Values.manager.image }}:{{ .Values.manager.tag }}"
imagePullPolicy: {{ .Values.manager.pullPolicy | quote }}
resources:
{{ toYaml .Values.manager.resources | indent 10 }}
env:
{{- if .Values.manager.maxProcs }}
- name: GOMAXPROCS
value: {{ .Values.manager.maxProcs }}
{{- end }}
{{ toYaml .Values.manager.resources | indent 12 }}
ports:
- containerPort: {{ .Values.manager.restPort }}
protocol: TCP
@ -112,15 +93,32 @@ spec:
{{- toYaml .Values.manager.extraVolumeMounts | nindent 8 }}
{{- end }}
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:{{ .Values.manager.grpcPort }}"]
httpGet:
path: /healthy
port: {{ .Values.manager.restPort }}
initialDelaySeconds: 5
periodSeconds: 10
livenessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:{{ .Values.manager.grpcPort }}"]
httpGet:
path: /healthy
port: {{ .Values.manager.restPort }}
initialDelaySeconds: 15
periodSeconds: 10
periodSeconds: 20
{{- if .Values.manager.sidecar }}
- name: {{ .Values.manager.sidecar.name }}
image: {{ .Values.manager.sidecar.image }}:{{ .Values.manager.sidecar.tag }}
args:
{{- range .Values.manager.sidecar.args }}
- {{ . }}
{{- end }}
command:
{{- range .Values.manager.sidecar.command }}
- {{ . }}
{{- end }}
imagePullPolicy: {{ .Values.manager.sidecar.pullPolicy | quote }}
resources:
{{ toYaml .Values.manager.sidecar.resources | indent 12 }}
{{- end }}
volumes:
- name: config
configMap:

View File

@ -1,17 +1,11 @@
{{- if .Values.manager.ingress.enable -}}
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1beta1
{{- else -}}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: {{ template "dragonfly.manager.fullname" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels:
app: {{ template "dragonfly.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: {{ .Values.manager.name }}
@ -20,9 +14,6 @@ metadata:
{{ toYaml .Values.manager.ingress.annotations | indent 4 }}
{{- end }}
spec:
{{- if and .Values.manager.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
ingressClassName: {{ .Values.manager.ingress.className }}
{{- end }}
{{- if .Values.manager.ingress.tls }}
tls:
{{- range .Values.manager.ingress.tls }}
@ -39,18 +30,8 @@ spec:
http:
paths:
- path: {{ $.Values.manager.ingress.path }}
{{- if and $.Values.manager.ingress.pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
pathType: {{ default "ImplementationSpecific" $.Values.manager.ingress.pathType }}
{{- end }}
backend:
{{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
service:
name: {{ template "dragonfly.manager.fullname" $ }}
port:
number: {{ $.Values.manager.restPort }}
{{- else }}
serviceName: {{ template "dragonfly.manager.fullname" $ }}
servicePort: {{ $.Values.manager.restPort }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -3,9 +3,9 @@ apiVersion: v1
kind: Service
metadata:
name: {{ template "dragonfly.manager.fullname" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels:
app: {{ template "dragonfly.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: {{ .Values.manager.name }}
@ -21,18 +21,14 @@ spec:
ports:
- port: {{ .Values.manager.restPort }}
name: http-rest
appProtocol: http
protocol: TCP
targetPort: {{ .Values.manager.restPort }}
- port: {{ .Values.manager.grpcPort }}
name: grpc
appProtocol: grpc
name: http-grpc
protocol: TCP
targetPort: {{ .Values.manager.grpcPort }}
{{- if eq .Values.manager.service.type "NodePort" }}
nodePort: {{ .Values.manager.service.nodePort }}
{{- end }}
selector:
app: {{ template "dragonfly.fullname" . }}
release: {{ .Release.Name }}
component: {{ .Values.manager.name }}
{{- end }}

View File

@ -3,14 +3,14 @@ apiVersion: v1
kind: Service
metadata:
name: {{ template "dragonfly.manager.fullname" . }}-metrics
namespace: {{ include "common.names.namespace" . | quote }}
labels:
app: {{ template "dragonfly.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: {{ .Values.manager.name }}-metrics
{{- if .Values.manager.metrics.service.labels }}
{{ toYaml .Values.manager.metrics.service.labels | indent 4 }}
{{ toYaml .Values.metrics.service.labels | indent 4 }}
{{- end }}
{{- if .Values.manager.metrics.service.annotations }}
annotations:
@ -21,10 +21,10 @@ spec:
ports:
- port: 8000
name: http-metrics
appProtocol: http
protocol: TCP
targetPort: 8000
protocol: TCP
selector:
app: {{ template "dragonfly.fullname" . }}
release: {{ .Release.Name }}
component: {{ .Values.manager.name }}
{{- end }}

View File

@ -3,9 +3,9 @@ apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: {{ template "dragonfly.manager.fullname" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels:
app: {{ template "dragonfly.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: {{ .Values.manager.name }}
@ -13,8 +13,9 @@ metadata:
{{ toYaml .Values.manager.metrics.prometheusRule.additionalLabels | indent 4 }}
{{- end }}
spec:
{{- with .Values.manager.metrics.prometheusRule.rules }}
groups:
- name: {{ template "dragonfly.manager.fullname" $ }}
rules:
{{ toYaml .Values.manager.metrics.prometheusRule.rules | indent 8 }}
rules: {{ tpl (toYaml .) $ | nindent 8 }}
{{- end }}
{{- end }}

View File

@ -3,9 +3,9 @@ apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "dragonfly.manager.fullname" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels:
app: {{ template "dragonfly.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: {{ .Values.manager.name }}
@ -26,6 +26,7 @@ spec:
- {{ .Release.Namespace }}
selector:
matchLabels:
app: {{ template "dragonfly.name" . }}
app: {{ template "dragonfly.fullname" . }}
release: {{ .Release.Name }}
component: {{ .Values.manager.name }}-metrics
{{- end }}

View File

@ -3,14 +3,14 @@ apiVersion: v1
kind: Service
metadata:
name: {{ template "dragonfly.scheduler.fullname" . }}-metrics
namespace: {{ include "common.names.namespace" . | quote }}
labels:
app: {{ template "dragonfly.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: {{ .Values.scheduler.name }}-metrics
{{- if .Values.scheduler.metrics.service.labels }}
{{ toYaml .Values.scheduler.metrics.service.labels | indent 4 }}
{{ toYaml .Values.metrics.service.labels | indent 4 }}
{{- end }}
{{- if .Values.scheduler.metrics.service.annotations }}
annotations:
@ -21,10 +21,10 @@ spec:
ports:
- port: 8000
name: http-metrics
appProtocol: http
protocol: TCP
targetPort: 8000
protocol: TCP
selector:
app: {{ template "dragonfly.fullname" . }}
component: {{ .Values.scheduler.name }}
release: {{ .Release.Name }}
{{- end }}

View File

@ -3,9 +3,9 @@ apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: {{ template "dragonfly.scheduler.fullname" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels:
app: {{ template "dragonfly.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: {{ .Values.scheduler.name }}
@ -13,8 +13,9 @@ metadata:
{{ toYaml .Values.scheduler.metrics.prometheusRule.additionalLabels | indent 4 }}
{{- end }}
spec:
{{- with .Values.scheduler.metrics.prometheusRule.rules }}
groups:
- name: {{ template "dragonfly.scheduler.fullname" $ }}
rules:
{{ toYaml .Values.scheduler.metrics.prometheusRule.rules | indent 8 }}
rules: {{ tpl (toYaml .) $ | nindent 8 }}
{{- end }}
{{- end }}

View File

@ -3,35 +3,22 @@ apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "dragonfly.scheduler.fullname" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels:
app: {{ template "dragonfly.fullname" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: {{ .Values.scheduler.name }}
data:
scheduler.yaml: |-
deubug: {{ .Values.scheduler.config.debug }}
server:
{{ toYaml .Values.scheduler.config.server | indent 6 }}
port: {{ .Values.scheduler.containerPort }}
listenLimit: {{ .Values.scheduler.config.server.listenLimit }}
cacheDir: {{ .Values.scheduler.config.server.cacheDir }}
logDir: {{ .Values.scheduler.config.server.logDir }}
scheduler:
{{ toYaml .Values.scheduler.config.scheduler | indent 6 }}
database:
redis:
{{- if .Values.redis.enable }}
addrs:
- {{ .Release.Name }}-{{ default "redis" .Values.redis.fullname }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.redis.master.service.ports.redis }}
password: {{ .Values.redis.auth.password }}
{{- else }}
addrs:
{{ toYaml .Values.externalRedis.addrs | indent 10 }}
masterName: {{ .Values.externalRedis.masterName }}
username: {{ .Values.externalRedis.username }}
password: {{ .Values.externalRedis.password }}
sentinelUsername: {{ .Values.externalRedis.sentinelUsername }}
sentinelPassword: {{ .Values.externalRedis.sentinelPassword }}
brokerDB: {{ .Values.externalRedis.brokerDB }}
backendDB: {{ .Values.externalRedis.backendDB }}
{{- end }}
dynconfig:
{{ toYaml .Values.scheduler.config.dynconfig | indent 6 }}
host:
@ -45,35 +32,29 @@ data:
schedulerClusterID: {{ .Values.scheduler.config.manager.schedulerClusterID }}
keepAlive:
{{ toYaml .Values.scheduler.config.manager.keepAlive | indent 8 }}
seedPeer:
{{ toYaml .Values.scheduler.config.seedPeer | indent 6 }}
job:
redis:
{{- if .Values.redis.enable }}
addrs:
- {{ .Release.Name }}-{{ default "redis" .Values.redis.fullname }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.redis.master.service.ports.redis }}
password: {{ .Values.redis.auth.password }}
password: {{ .Values.redis.password }}
host: {{ .Release.Name }}-{{ default "redis" .Values.redis.fullname }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}
port: {{ .Values.redis.service.port }}
{{- else }}
addrs:
{{ toYaml .Values.externalRedis.addrs | indent 10 }}
masterName: {{ .Values.externalRedis.masterName }}
username: {{ .Values.externalRedis.username }}
password: {{ .Values.externalRedis.password }}
sentinelUsername: {{ .Values.externalRedis.sentinelUsername }}
sentinelPassword: {{ .Values.externalRedis.sentinelPassword }}
brokerDB: {{ .Values.externalRedis.brokerDB }}
backendDB: {{ .Values.externalRedis.backendDB }}
host: {{ .Values.externalRedis.host }}
port: {{ .Values.externalRedis.port }}
{{- end }}
storage:
{{ toYaml .Values.scheduler.config.storage | indent 6 }}
network:
{{ toYaml .Values.scheduler.config.network | indent 6 }}
metrics:
enable: {{ .Values.scheduler.metrics.enable }}
addr: ":8000"
enableHost: {{ .Values.scheduler.metrics.enableHost }}
enablePeerHost: {{ .Values.scheduler.metrics.enablePeerHost }}
console: {{ .Values.scheduler.config.console }}
pprofPort: {{ .Values.scheduler.config.pprofPort }}
tracing:
{{ toYaml .Values.scheduler.config.tracing | indent 6 }}
verbose: {{ .Values.scheduler.config.verbose }}
{{- if .Values.scheduler.config.verbose }}
pprof-port: {{ .Values.scheduler.config.pprofPort }}
{{- end }}
{{- if .Values.scheduler.config.jaeger }}
jaeger: {{ .Values.scheduler.config.jaeger }}
{{- else if .Values.jaeger.enable }}
jaeger: http://jaeger-all-in-one-collector.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }}:14268/api/traces
{{- end }}
{{- end }}

View File

@ -3,48 +3,41 @@ apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ template "dragonfly.scheduler.fullname" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels:
app: {{ template "dragonfly.fullname" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: {{ .Values.scheduler.name }}
annotations:
{{- if .Values.scheduler.statefulsetAnnotations }}
annotations:
{{ toYaml .Values.scheduler.statefulsetAnnotations | indent 4 }}
{{- end }}
spec:
{{- if .Values.scheduler.updateStrategy }}
updateStrategy:
{{ toYaml .Values.scheduler.updateStrategy | indent 4 }}
{{- end }}
replicas: {{ .Values.scheduler.replicas }}
selector:
matchLabels:
app: {{ template "dragonfly.fullname" . }}
release: {{ .Release.Name }}
component: {{ .Values.scheduler.name }}
serviceName: scheduler
template:
metadata:
labels:
app: {{ template "dragonfly.fullname" . }}
release: {{ .Release.Name }}
component: {{ .Values.scheduler.name }}
{{- if .Values.scheduler.podLabels }}
{{ toYaml .Values.scheduler.podLabels | indent 8 }}
{{- end }}
annotations:
checksum/config: {{ include (print $.Template.BasePath "/scheduler/scheduler-configmap.yaml") . | sha256sum }}
{{- if .Values.scheduler.podAnnotations }}
annotations:
{{ toYaml .Values.scheduler.podAnnotations | indent 8 }}
{{- end }}
spec:
hostNetwork: {{ .Values.scheduler.hostNetwork }}
{{- if .Values.scheduler.hostNetwork }}
dnsPolicy: "ClusterFirstWithHostNet"
{{- end }}
{{- with .Values.scheduler.nodeSelector | default .Values.global.nodeSelector }}
{{- if .Values.scheduler.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{ toYaml .Values.scheduler.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.scheduler.tolerations }}
tolerations:
@ -57,39 +50,28 @@ spec:
{{- if quote .Values.scheduler.terminationGracePeriodSeconds }}
terminationGracePeriodSeconds: {{ .Values.scheduler.terminationGracePeriodSeconds }}
{{- end }}
{{- if .Values.scheduler.priorityClassName }}
{{- if and (.Capabilities.APIVersions.Has "scheduling.k8s.io/v1beta1") (.Values.scheduler.priorityClassName) }}
priorityClassName: {{ .Values.scheduler.priorityClassName }}
{{- end }}
{{- with .Values.scheduler.image.pullSecrets | default .Values.global.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.scheduler.hostAliases }}
hostAliases:
{{ toYaml .Values.scheduler.hostAliases | indent 8 }}
{{- end }}
initContainers:
- name: wait-for-manager
image: {{ template "scheduler.initContainer.image" . }}
imagePullPolicy: {{ .Values.scheduler.initContainer.image.pullPolicy }}
image: {{ .Values.scheduler.initContainer.image }}:{{ .Values.scheduler.initContainer.tag }}
imagePullPolicy: {{ .Values.scheduler.initContainer.pullPolicy }}
{{- if .Values.manager.enable }}
command: ['sh', '-c', 'until nslookup {{ template "dragonfly.manager.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} && nc -vz {{ template "dragonfly.manager.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} {{ .Values.manager.restPort }}; do echo waiting for manager; sleep 2; done;']
{{- else }}
command: ['sh', '-c', 'until nslookup {{ .Values.externalManager.host }} && nc -vz {{ .Values.externalManager.host }} {{ .Values.externalManager.restPort }}; do echo waiting for external manager; sleep 2; done;']
{{- end }}
resources:
{{ toYaml .Values.scheduler.initContainer.resources | indent 10 }}
containers:
- name: scheduler
image: {{ template "scheduler.image" . }}
imagePullPolicy: {{ .Values.scheduler.image.pullPolicy | quote }}
image: "{{ .Values.scheduler.image }}:{{ .Values.scheduler.tag }}"
imagePullPolicy: {{ .Values.scheduler.pullPolicy | quote }}
resources:
{{ toYaml .Values.scheduler.resources | indent 10 }}
env:
{{- if .Values.scheduler.maxProcs }}
- name: GOMAXPROCS
value: {{ .Values.scheduler.maxProcs }}
{{- end }}
{{ toYaml .Values.scheduler.resources | indent 12 }}
ports:
- containerPort: {{ .Values.scheduler.containerPort }}
protocol: TCP
@ -103,16 +85,6 @@ spec:
{{- if .Values.scheduler.extraVolumeMounts }}
{{- toYaml .Values.scheduler.extraVolumeMounts | nindent 8 }}
{{- end }}
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:{{ .Values.scheduler.containerPort }}"]
initialDelaySeconds: 5
periodSeconds: 10
livenessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:{{ .Values.scheduler.containerPort }}"]
initialDelaySeconds: 15
periodSeconds: 10
volumes:
- name: config
configMap:

View File

@ -1,33 +0,0 @@
{{- if .Values.scheduler.enable }}
apiVersion: v1
kind: Service
metadata:
name: {{ template "dragonfly.scheduler.fullname" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels:
app: {{ template "dragonfly.name" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: {{ .Values.scheduler.name }}
{{- if .Values.scheduler.service.labels }}
{{ toYaml .Values.scheduler.service.labels | indent 4 }}
{{- end }}
{{- if .Values.scheduler.service.annotations }}
annotations:
{{ toYaml .Values.scheduler.service.annotations | indent 4 }}
{{- end }}
spec:
type: {{ .Values.scheduler.service.type }}
ports:
- port: {{ .Values.scheduler.config.server.port }}
name: grpc
appProtocol: grpc
protocol: TCP
targetPort: {{ .Values.scheduler.config.server.port }}
{{- if eq .Values.scheduler.service.type "NodePort" }}
nodePort: {{ .Values.scheduler.service.nodePort }}
{{- end }}
selector:
app: {{ template "dragonfly.fullname" . }}
component: {{ .Values.scheduler.name }}
{{- end }}

View File

@ -3,9 +3,9 @@ apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "dragonfly.scheduler.fullname" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels:
app: {{ template "dragonfly.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: {{ .Values.scheduler.name }}
@ -26,6 +26,7 @@ spec:
- {{ .Release.Namespace }}
selector:
matchLabels:
app: {{ template "dragonfly.name" . }}
app: {{ template "dragonfly.fullname" . }}
release: {{ .Release.Name }}
component: {{ .Values.scheduler.name }}-metrics
{{- end }}

View File

@ -1,30 +0,0 @@
{{- if .Values.seedClient.metrics.enable }}
apiVersion: v1
kind: Service
metadata:
name: {{ template "dragonfly.seedClient.fullname" . }}-metrics
namespace: {{ include "common.names.namespace" . | quote }}
labels:
app: {{ template "dragonfly.name" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: {{ .Values.seedClient.name }}-metrics
{{- if .Values.seedClient.metrics.service.labels }}
{{ toYaml .Values.seedClient.metrics.service.labels | indent 4 }}
{{- end }}
{{- if .Values.seedClient.metrics.service.annotations }}
annotations:
{{ toYaml .Values.seedClient.metrics.service.annotations | indent 4 }}
{{- end }}
spec:
type: {{ .Values.seedClient.metrics.service.type }}
ports:
- port: {{ .Values.seedClient.config.metrics.server.port }}
name: http-metrics
appProtocol: http
protocol: TCP
targetPort: {{ .Values.seedClient.config.metrics.server.port }}
selector:
app: {{ template "dragonfly.fullname" . }}
component: {{ .Values.seedClient.name }}
{{- end }}

View File

@ -1,20 +0,0 @@
{{- if and .Values.seedClient.metrics.enable .Values.seedClient.metrics.prometheusRule.enable }}
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: {{ template "dragonfly.seedClient.fullname" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels:
app: {{ template "dragonfly.name" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: {{ .Values.seedClient.name }}
{{- if .Values.seedClient.metrics.prometheusRule.additionalLabels }}
{{ toYaml .Values.seedClient.metrics.prometheusRule.additionalLabels | indent 4 }}
{{- end }}
spec:
groups:
- name: {{ template "dragonfly.seedClient.fullname" $ }}
rules:
{{ toYaml .Values.seedClient.metrics.prometheusRule.rules | indent 8 }}
{{- end }}

View File

@ -1,50 +0,0 @@
{{- if .Values.seedClient.enable }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "dragonfly.seedClient.fullname" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels:
app: {{ template "dragonfly.fullname" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: {{ .Values.seedClient.name }}
data:
dfdaemon.yaml: |-
host:
{{ toYaml .Values.seedClient.config.host | indent 6 }}
server:
{{ toYaml .Values.seedClient.config.server | indent 6 }}
download:
{{ toYaml .Values.seedClient.config.download | indent 6 }}
upload:
{{ toYaml .Values.seedClient.config.upload | indent 6 }}
manager:
{{- if .Values.seedClient.config.manager.addrs }}
addr: {{ .Values.seedClient.config.manager.addr }}
{{- else if .Values.manager.enable }}
addr: http://{{ template "dragonfly.manager.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.manager.grpcPort }}
{{- else }}
addr: http://{{ .Values.externalManager.host }}:{{ .Values.externalManager.grpcPort }}
{{- end }}
scheduler:
{{ toYaml .Values.seedClient.config.scheduler | indent 6 }}
seedPeer:
{{ toYaml .Values.seedClient.config.seedPeer | indent 6 }}
dynconfig:
{{ toYaml .Values.seedClient.config.dynconfig | indent 6 }}
storage:
{{ toYaml .Values.seedClient.config.storage | indent 6 }}
gc:
{{ toYaml .Values.seedClient.config.gc | indent 6 }}
proxy:
{{ toYaml .Values.seedClient.config.proxy | indent 6 }}
health:
{{ toYaml .Values.seedClient.config.health | indent 6 }}
metrics:
{{ toYaml .Values.seedClient.config.metrics | indent 6 }}
stats:
{{ toYaml .Values.seedClient.config.stats | indent 6 }}
tracing:
{{ toYaml .Values.seedClient.config.tracing | indent 6 }}
{{- end }}

View File

@ -1,157 +0,0 @@
{{- if .Values.seedClient.enable }}
apiVersion: apps/v1
kind: StatefulSet
metadata:
labels:
app: {{ template "dragonfly.fullname" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: {{ .Values.seedClient.name }}
name: {{ template "dragonfly.seedClient.fullname" . }}
namespace: {{ include "common.names.namespace" . | quote }}
annotations:
{{- if .Values.seedClient.statefulsetAnnotations }}
{{ toYaml .Values.seedClient.statefulsetAnnotations | indent 4 }}
{{- end }}
spec:
{{- if .Values.seedClient.updateStrategy }}
updateStrategy:
{{ toYaml .Values.seedClient.updateStrategy | indent 4 }}
{{- end }}
replicas: {{ .Values.seedClient.replicas }}
selector:
matchLabels:
app: {{ template "dragonfly.fullname" . }}
component: {{ .Values.seedClient.name }}
serviceName: seed-client
template:
metadata:
labels:
app: {{ template "dragonfly.fullname" . }}
component: {{ .Values.seedClient.name }}
{{- if .Values.seedClient.podLabels }}
{{ toYaml .Values.seedClient.podLabels | indent 8 }}
{{- end }}
annotations:
checksum/config: {{ include (print $.Template.BasePath "/seed-client/seed-client-configmap.yaml") . | sha256sum }}
{{- if .Values.seedClient.podAnnotations }}
{{ toYaml .Values.seedClient.podAnnotations | indent 8 }}
{{- end }}
spec:
hostNetwork: {{ .Values.seedClient.hostNetwork }}
{{- if .Values.seedClient.hostNetwork }}
dnsPolicy: "ClusterFirstWithHostNet"
{{- end }}
{{- with .Values.seedClient.nodeSelector | default .Values.global.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.seedClient.tolerations }}
tolerations:
{{ toYaml .Values.seedClient.tolerations | indent 8 }}
{{- end }}
{{- if .Values.seedClient.affinity }}
affinity:
{{ toYaml .Values.seedClient.affinity | indent 8 }}
{{- end }}
{{- if quote .Values.seedClient.terminationGracePeriodSeconds }}
terminationGracePeriodSeconds: {{ .Values.seedClient.terminationGracePeriodSeconds }}
{{- end }}
{{- if .Values.seedClient.priorityClassName }}
priorityClassName: {{ .Values.seedClient.priorityClassName }}
{{- end }}
{{- with .Values.seedClient.image.pullSecrets | default .Values.global.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.seedClient.hostAliases }}
hostAliases:
{{ toYaml .Values.seedClient.hostAliases | indent 8 }}
{{- end }}
initContainers:
- name: wait-for-manager
image: {{ template "seedClient.initContainer.image" . }}
imagePullPolicy: {{ .Values.seedClient.initContainer.image.pullPolicy }}
{{- if .Values.manager.enable }}
command: ['sh', '-c', 'until nslookup {{ template "dragonfly.manager.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} && nc -vz {{ template "dragonfly.manager.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} {{ .Values.manager.restPort }}; do echo waiting for manager; sleep 2; done;']
{{- else }}
command: ['sh', '-c', 'until nslookup {{ .Values.externalManager.host }} && nc -vz {{ .Values.externalManager.host }} {{ .Values.externalManager.restPort }}; do echo waiting for external manager; sleep 2; done;']
{{- end }}
resources:
{{ toYaml .Values.seedClient.initContainer.resources | indent 10 }}
containers:
- name: seed-client
image: {{ template "seedClient.image" . }}
imagePullPolicy: {{ .Values.seedClient.image.pullPolicy | quote }}
args:
- --log-level={{ .Values.client.config.log.level }}
{{- if .Values.seedClient.config.console }}
- --console
{{- end }}
resources:
{{ toYaml .Values.seedClient.resources | indent 10 }}
env:
{{- if .Values.seedClient.maxProcs }}
- name: GOMAXPROCS
value: {{ .Values.seedClient.maxProcs }}
{{- end }}
ports:
- containerPort: {{ .Values.seedClient.config.upload.server.port }}
protocol: TCP
- containerPort: {{ .Values.seedClient.config.proxy.server.port }}
protocol: TCP
- containerPort: {{ .Values.seedClient.config.health.server.port }}
protocol: TCP
- containerPort: {{ .Values.seedClient.config.metrics.server.port }}
protocol: TCP
- containerPort: {{ .Values.seedClient.config.stats.server.port }}
protocol: TCP
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=unix://{{ .Values.seedClient.config.download.server.socketPath }}"]
initialDelaySeconds: 5
periodSeconds: 30
timeoutSeconds: 5
livenessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=unix://{{ .Values.seedClient.config.download.server.socketPath }}"]
initialDelaySeconds: 15
periodSeconds: 30
timeoutSeconds: 5
volumeMounts:
- name: config
mountPath: "/etc/dragonfly"
- name: storage
mountPath: {{ .Values.seedClient.config.storage.dir }}
{{- if .Values.seedClient.extraVolumeMounts }}
{{- toYaml .Values.seedClient.extraVolumeMounts | nindent 8 }}
{{- end }}
volumes:
- name: config
configMap:
name: {{ template "dragonfly.seedClient.fullname" . }}
{{- if not (.Values.seedClient.persistence.enable) }}
- name: storage
emptyDir: {}
{{- end }}
{{- if .Values.seedClient.extraVolumes }}
{{- toYaml .Values.seedClient.extraVolumes | nindent 6 }}
{{- end }}
{{- if .Values.seedClient.persistence.enable }}
volumeClaimTemplates:
- metadata:
name: storage
{{- range $key, $value := .Values.seedClient.persistence.annotations }}
{{ $key }}: {{ $value }}
{{- end }}
spec:
accessModes:
{{- range .Values.seedClient.persistence.accessModes }}
- {{ . | quote }}
{{- end }}
resources:
requests:
storage: {{ .Values.seedClient.persistence.size | quote }}
{{- include "common.storage.class" (dict "persistence" .Values.seedClient.persistence "global" .Values.global) | nindent 8 }}
{{- end }}
{{- end }}

View File

@ -1,43 +0,0 @@
{{- if .Values.seedClient.enable }}
apiVersion: v1
kind: Service
metadata:
name: {{ template "dragonfly.seedClient.fullname" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels:
app: {{ template "dragonfly.name" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: {{ .Values.seedClient.name }}
{{- if .Values.seedClient.service.labels }}
{{ toYaml .Values.seedClient.service.labels | indent 4 }}
{{- end }}
{{- if .Values.seedClient.service.annotations }}
annotations:
{{ toYaml .Values.seedClient.service.annotations | indent 4 }}
{{- end }}
spec:
type: {{ .Values.seedClient.service.type }}
ports:
- port: {{ .Values.seedClient.config.proxy.server.port }}
name: http-proxy
appProtocol: http
protocol: TCP
targetPort: {{ .Values.seedClient.config.proxy.server.port }}
- port: {{ .Values.seedClient.config.health.server.port }}
name: http-health
appProtocol: http
protocol: TCP
targetPort: {{ .Values.seedClient.config.health.server.port }}
- port: {{ .Values.seedClient.config.stats.server.port }}
name: http-stats
appProtocol: http
protocol: TCP
targetPort: {{ .Values.seedClient.config.stats.server.port }}
{{- if eq .Values.seedClient.service.type "NodePort" }}
nodePort: {{ .Values.seedClient.service.nodePort }}
{{- end }}
selector:
app: {{ template "dragonfly.fullname" . }}
component: {{ .Values.seedClient.name }}
{{- end }}

View File

@ -1,31 +0,0 @@
{{- if and .Values.seedClient.metrics.enable .Values.seedClient.metrics.serviceMonitor.enable }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "dragonfly.seedClient.fullname" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels:
app: {{ template "dragonfly.name" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: {{ .Values.seedClient.name }}
{{- if .Values.seedClient.metrics.serviceMonitor.additionalLabels }}
{{ toYaml .Values.seedClient.metrics.serviceMonitor.additionalLabels | indent 4 }}
{{- end }}
spec:
endpoints:
- port: http-metrics
{{- if .Values.seedClient.metrics.serviceMonitor.interval }}
interval: {{ .Values.seedClient.metrics.serviceMonitor.interval }}
{{- end }}
{{- if .Values.seedClient.metrics.serviceMonitor.scrapeTimeout }}
scrapeTimeout: {{ .Values.seedClient.metrics.serviceMonitor.scrapeTimeout }}
{{- end }}
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
selector:
matchLabels:
app: {{ template "dragonfly.name" . }}
component: {{ .Values.seedClient.name }}-metrics
{{- end }}

File diff suppressed because it is too large Load Diff

View File

@ -1,25 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
# Helm Docs
README.md.gotmpl

View File

@ -1,42 +0,0 @@
apiVersion: v2
name: nydus-snapshotter
description: Nydus snapshotter is an external plugin of containerd for Nydus image service which implements a chunk-based content-addressable filesystem on top of a called RAFS.
icon: https://github.com/dragonflyoss/image-service/raw/master/misc/logo.svg
type: application
version: 0.0.10
appVersion: 0.9.0
keywords:
- nydus
- nydus-snapshotter
- dragonfly
- d7y
- P2P
- image
maintainers:
- name: adamqqqplay
email: adamqqq@163.com
- name: imeoer
email: imeoer@gmail.com
- name: liubin
email: lb203159@antgroup.com
- name: gaius-qi
email: gaius.qi@gmail.com
home: https://nydus.dev/
sources:
- https://github.com/dragonflyoss/image-service
- https://github.com/containerd/nydus-snapshotter/
annotations:
artifacthub.io/changes: |
- Change default port of the Dragonfly.
artifacthub.io/links: |
- name: Chart Source
url: https://github.com/dragonflyoss/helm-charts
- name: Source
url: https://github.com/containerd/nydus-snapshotter
artifacthub.io/images: |
- name: nydus-snapshotter
image: ghcr.io/nydus-snapshotter/nydus-snapshotter:latest

View File

@ -1,98 +0,0 @@
# Nydus-snapshotter Helm Chart
[![Artifact Hub](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/dragonfly)](https://artifacthub.io/packages/search?repo=dragonfly)
## TL;DR
```shell
helm repo add dragonfly https://dragonflyoss.github.io/helm-charts/
helm install --create-namespace --namespace nydus-snapshotter nydus-snapshotter dragonfly/nydus-snapshotter
```
## Introduction
Nydus snapshotter is an external plugin of containerd for [Nydus image service](https://nydus.dev) which implements a chunk-based content-addressable filesystem on top of a called `RAFS (Registry Acceleration File System)` format that improves the current OCI image specification, in terms of container launching speed, image space, and network bandwidth efficiency, as well as data integrity with several runtime backends: FUSE, virtiofs and in-kernel [EROFS](https://www.kernel.org/doc/html/latest/filesystems/erofs.html).
Nydus supports lazy pulling feature since pulling image is one of the time-consuming steps in the container lifecycle. Lazy pulling here means a container can run even the image is partially available and necessary chunks of the image are fetched on-demand. Apart from that, Nydus also supports [(e)Stargz](https://github.com/containerd/stargz-snapshotter) lazy pulling directly **WITHOUT** any explicit conversion.
For more details about how to build Nydus container image, please refer to [nydusify](https://github.com/dragonflyoss/image-service/blob/master/docs/nydusify.md) conversion tool and [acceld](https://github.com/goharbor/acceleration-service).
## Prerequisites
- Kubernetes cluster 1.20+
- Helm v3.8.0+
## Installation Guide
For more detail about installation is available in [nydus-snapshotter repo](https://github.com/containerd/nydus-snapshotter).
## Installation
### Install with custom configuration
Create the `values.yaml` configuration file.
```yaml
nydusSnapshotter:
name: nydus-snapshotter
image: ghcr.io/containerd/nydus-snapshotter
tag: v0.13.4
```
Install nydus-snapshotter chart with release name `nydus-snapshotter`:
```shell
helm repo add dragonfly https://dragonflyoss.github.io/helm-charts/
helm install --create-namespace --namespace nydus-snapshotter nydus-snapshotter dragonfly/nydus-snapshotter -f values.yaml
```
## Uninstall
Uninstall the `nydus-snapshotter` daemonset:
```shell
helm delete nydus-snapshotter --namespace nydus-snapshotter
```
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| args | list | `[]` | Args to overwrite default nydus-snapshotter startup command |
| containerRuntime | object | `{"containerd":{"configFile":"/etc/containerd/config.toml","enable":true},"initContainer":{"image":{"pullPolicy":"Always","registry":"ghcr.io","repository":"liubin/toml-cli","tag":"v0.0.7"}}}` | [Experimental] Container runtime support Choose special container runtime in Kubernetes. Support: Containerd, Docker, CRI-O |
| containerRuntime.containerd | object | `{"configFile":"/etc/containerd/config.toml","enable":true}` | [Experimental] Containerd support |
| containerRuntime.containerd.configFile | string | `"/etc/containerd/config.toml"` | Custom config path directory, default is /etc/containerd/config.toml |
| containerRuntime.containerd.enable | bool | `true` | Enable containerd support Inject nydus-snapshotter config into ${containerRuntime.containerd.configFile}, |
| containerRuntime.initContainer.image.pullPolicy | string | `"Always"` | Image pull policy. |
| containerRuntime.initContainer.image.registry | string | `"ghcr.io"` | Image registry. |
| containerRuntime.initContainer.image.repository | string | `"liubin/toml-cli"` | Image repository. |
| containerRuntime.initContainer.image.tag | string | `"v0.0.7"` | Image tag. |
| daemonsetAnnotations | object | `{}` | Daemonset annotations |
| dragonfly.enable | bool | `true` | Enable dragonfly |
| dragonfly.mirrorConfig[0].auth_through | bool | `false` | |
| dragonfly.mirrorConfig[0].headers.X-Dragonfly-Registry | string | `"https://index.docker.io"` | |
| dragonfly.mirrorConfig[0].host | string | `"http://127.0.0.1:4001"` | |
| dragonfly.mirrorConfig[0].ping_url | string | `"http://127.0.0.1:4003/healthy"` | |
| global.imagePullSecrets | list | `[]` | Global Docker registry secret names as an array. |
| global.imageRegistry | string | `""` | Global Docker image registry. |
| global.nodeSelector | object | `{}` | Global node labels for pod assignment. |
| hostAliases | list | `[]` | Host Aliases |
| hostNetwork | bool | `true` | Let nydus-snapshotter run in host network |
| hostPid | bool | `true` | Let nydus-snapshotter use the host's pid namespace |
| image.pullPolicy | string | `"Always"` | Image pull policy. |
| image.pullSecrets | list | `[]` (defaults to global.imagePullSecrets). | Image pull secrets. |
| image.registry | string | `"ghcr.io"` | Image registry. |
| image.repository | string | `"containerd/nydus-snapshotter"` | Image repository. |
| image.tag | string | `"v0.9.0"` | Image tag. |
| name | string | `"nydus-snapshotter"` | nydus-snapshotter name |
| nodeSelector | object | `{}` | Node labels for pod assignment |
| podAnnotations | object | `{}` | Pod annotations |
| podLabels | object | `{}` | Pod labels |
| priorityClassName | string | `""` | Pod priorityClassName |
| resources | object | `{"limits":{"cpu":"2","memory":"2Gi"},"requests":{"cpu":"0","memory":"0"}}` | Pod resource requests and limits |
| terminationGracePeriodSeconds | string | `nil` | Pod terminationGracePeriodSeconds |
| tolerations | list | `[]` | List of node taints to tolerate |
## Chart dependencies
| Repository | Name | Version |
|------------|------|---------|

View File

@ -1,61 +0,0 @@
# Nydus-snapshotter Helm Chart
[![Artifact Hub](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/dragonfly)](https://artifacthub.io/packages/search?repo=dragonfly)
## TL;DR
```shell
helm repo add dragonfly https://dragonflyoss.github.io/helm-charts/
helm install --create-namespace --namespace nydus-snapshotter nydus-snapshotter dragonfly/nydus-snapshotter
```
## Introduction
Nydus snapshotter is an external plugin of containerd for [Nydus image service](https://nydus.dev) which implements a chunk-based content-addressable filesystem on top of a called `RAFS (Registry Acceleration File System)` format that improves the current OCI image specification, in terms of container launching speed, image space, and network bandwidth efficiency, as well as data integrity with several runtime backends: FUSE, virtiofs and in-kernel [EROFS](https://www.kernel.org/doc/html/latest/filesystems/erofs.html).
Nydus supports lazy pulling feature since pulling image is one of the time-consuming steps in the container lifecycle. Lazy pulling here means a container can run even the image is partially available and necessary chunks of the image are fetched on-demand. Apart from that, Nydus also supports [(e)Stargz](https://github.com/containerd/stargz-snapshotter) lazy pulling directly **WITHOUT** any explicit conversion.
For more details about how to build Nydus container image, please refer to [nydusify](https://github.com/dragonflyoss/image-service/blob/master/docs/nydusify.md) conversion tool and [acceld](https://github.com/goharbor/acceleration-service).
## Prerequisites
- Kubernetes cluster 1.20+
- Helm v3.8.0+
## Installation Guide
For more detail about installation is available in [nydus-snapshotter repo](https://github.com/containerd/nydus-snapshotter).
## Installation
### Install with custom configuration
Create the `values.yaml` configuration file.
```yaml
nydusSnapshotter:
name: nydus-snapshotter
image: ghcr.io/containerd/nydus-snapshotter
tag: v0.13.4
```
Install nydus-snapshotter chart with release name `nydus-snapshotter`:
```shell
helm repo add dragonfly https://dragonflyoss.github.io/helm-charts/
helm install --create-namespace --namespace nydus-snapshotter nydus-snapshotter dragonfly/nydus-snapshotter -f values.yaml
```
## Uninstall
Uninstall the `nydus-snapshotter` daemonset:
```shell
helm delete nydus-snapshotter --namespace nydus-snapshotter
```
{{ template "chart.valuesSection" . }}
## Chart dependencies
{{ template "chart.requirementsTable" . }}

View File

@ -1,8 +0,0 @@
Thank you for installing {{ .Chart.Name }}.
Your release is named {{ .Release.Name }}.
To learn more about the release, try:
$ helm status {{ .Release.Name }}
$ helm get all {{ .Release.Name }}

View File

@ -1,60 +0,0 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "nydus-snapshotter.name" -}}
{{- default .Chart.Name -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "nydus-snapshotter.fullname" -}}
{{- $name := default .Chart.Name -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{/* vim: set filetype=mustache: */}}
{{/*
Return the proper image name
{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" .Values.global ) }}
*/}}
{{- define "common.images.image" -}}
{{- $registryName := .imageRoot.registry -}}
{{- $repositoryName := .imageRoot.repository -}}
{{- $separator := ":" -}}
{{- $termination := .imageRoot.tag | toString -}}
{{- if .global }}
{{- if .global.imageRegistry }}
{{- $registryName = .global.imageRegistry -}}
{{- end -}}
{{- end -}}
{{- if .imageRoot.digest }}
{{- $separator = "@" -}}
{{- $termination = .imageRoot.digest | toString -}}
{{- end -}}
{{- if $registryName }}
{{- printf "%s/%s%s%s" $registryName $repositoryName $separator $termination -}}
{{- else -}}
{{- printf "%s%s%s" $repositoryName $separator $termination -}}
{{- end -}}
{{- end -}}
{{/*
Return the proper image name (for the nydus-snapshotter)
*/}}
{{- define "nydus-snapshotter.image" -}}
{{- include "common.images.image" ( dict "imageRoot" .Values.image "global" .Values.global ) -}}
{{- end -}}
{{/*
Return the proper image name (for the nydus-snapshotter)
*/}}
{{- define "nydus-snapshotter.initContainer.image" -}}
{{- include "common.images.image" ( dict "imageRoot" .Values.containerRuntime.initContainer.image "global" .Values.global ) -}}
{{- end -}}

View File

@ -1,43 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "nydus-snapshotter.fullname" . }}
labels:
app: {{ template "nydus-snapshotter.fullname" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: {{ .Values.name }}
data:
config.json: |-
{
"device": {
"backend": {
"type": "registry",
"config": {
{{- if .Values.dragonfly.enable }}
"mirrors": {{ mustToJson .Values.dragonfly.mirrorConfig }},
{{- end }}
"timeout": 5,
"connect_timeout": 5,
"retry_limit": 2
}
},
"cache": {
"type": "blobcache",
"config": {
"work_dir": "/var/lib/nydus/cache/"
}
}
},
"mode": "direct",
"digest_validate": false,
"iostats_files": false,
"enable_xattr": true,
"fs_prefetch": {
"enable": true,
"threads_count": 8,
"merging_size": 1048576,
"prefetch_all": true
}
}

View File

@ -1,158 +0,0 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: {{ template "nydus-snapshotter.fullname" . }}
labels:
app: {{ template "nydus-snapshotter.fullname" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: "{{ .Values.name }}"
annotations:
{{- if .Values.daemonsetAnnotations }}
{{ toYaml .Values.daemonsetAnnotations | indent 4 }}
{{- end }}
spec:
selector:
matchLabels:
app: {{ template "nydus-snapshotter.fullname" . }}
component: "{{ .Values.name }}"
release: {{ .Release.Name }}
template:
metadata:
labels:
app: {{ template "nydus-snapshotter.fullname" . }}
component: "{{ .Values.name }}"
release: {{ .Release.Name }}
{{- if .Values.podLabels }}
{{ toYaml .Values.podLabels | indent 8 }}
{{- end }}
{{- if .Values.podAnnotations }}
annotations:
{{ toYaml .Values.podAnnotations | indent 8 }}
{{- end }}
spec:
serviceAccountName: {{template "nydus-snapshotter.fullname" . }}-sa
{{- if .Values.nodeSelector }}
nodeSelector:
{{ toYaml .Values.nodeSelector | indent 8 }}
{{- end }}
hostNetwork: {{ .Values.hostNetwork }}
hostPID: {{ .Values.hostPid }}
{{- if .Values.hostNetwork }}
dnsPolicy: "ClusterFirstWithHostNet"
{{- end }}
{{- if .Values.tolerations }}
tolerations:
{{ toYaml .Values.tolerations | indent 8 }}
{{- end }}
{{- if quote .Values.terminationGracePeriodSeconds }}
terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }}
{{- end }}
{{- if and (.Capabilities.APIVersions.Has "scheduling.k8s.io/v1beta1") (.Values.priorityClassName) }}
priorityClassName: {{ .Values.priorityClassName }}
{{- end }}
{{- with .Values.image.pullSecrets | default .Values.global.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.hostAliases }}
hostAliases:
{{ toYaml .Values.hostAliases | indent 8 }}
{{- end }}
containers:
- name: nydus-snapshotter
image: {{ template "nydus-snapshotter.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: ENABLE_NYDUS_OVERLAY
value: "false"
resources:
{{ toYaml .Values.resources | indent 12 }}
{{- if .Values.containerRuntime.containerd.enable }}
lifecycle:
postStart:
exec:
command:
- "/bin/sh"
- "-c"
- |
# currently, without host pid in container, we can not nsenter with pid and can not invoke systemctl correctly.
nsenter -t 1 -m systemctl -- restart containerd.service
{{- end }}
{{- if .Values.args }}
args:
{{ toYaml .Values.args | indent 12 }}
{{- end }}
volumeMounts:
- name: config
mountPath: "/etc/nydus/"
- name: nydus-lib
mountPath: "/var/lib/containerd-nydus"
mountPropagation: Bidirectional
- name: nydus-run
mountPath: "/run/containerd-nydus"
mountPropagation: Bidirectional
- name: fuse
mountPath: "/dev/fuse"
securityContext:
privileged: true
initContainers:
{{- if .Values.containerRuntime.containerd.enable }}
- name: update-containerd
image: {{ template "nydus-snapshotter.initContainer.image" . }}
imagePullPolicy: {{ .Values.containerRuntime.initContainer.image.pullPolicy }}
resources:
{{ toYaml .Values.resources | indent 12 }}
command:
- /bin/sh
- -cx
- |-
etcContainerd={{ .Values.containerRuntime.containerd.configFile }}
toml check $etcContainerd proxy_plugins.nydus
if [ $? -eq 0 ]; then
echo "nydus snapshotter has already configured."
exit 0
fi
toml set --overwrite $etcContainerd plugins.\"io.containerd.grpc.v1.cri\".containerd.discard_unpacked_layers false
toml set --overwrite $etcContainerd plugins.\"io.containerd.grpc.v1.cri\".containerd.disable_snapshot_annotations false
toml set --overwrite $etcContainerd plugins.\"io.containerd.grpc.v1.cri\".containerd.snapshotter nydus
# toml command not support to set block, so just use cat command.
cat << EOF >> $etcContainerd
[proxy_plugins]
[proxy_plugins.nydus]
type = "snapshot"
address = "/run/containerd-nydus/containerd-nydus-grpc.sock"
EOF
volumeMounts:
- name: containerd-conf
mountPath: {{ .Values.containerRuntime.containerd.configFile }}
{{- end }}
volumes:
- name: config
configMap:
name: {{ template "nydus-snapshotter.fullname" . }}
- name: nydus-run
hostPath:
path: /run/containerd-nydus
type: DirectoryOrCreate
- name: nydus-lib
hostPath:
path: /var/lib/containerd-nydus
type: DirectoryOrCreate
- name: fuse
hostPath:
path: /dev/fuse
{{- if .Values.containerRuntime.containerd.enable }}
- name: containerd-conf
hostPath:
path: {{ .Values.containerRuntime.containerd.configFile }}
{{- end }}

View File

@ -1,37 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{template "nydus-snapshotter.fullname" . }}-sa
namespace: {{ .Release.Namespace }}
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ template "nydus-snapshotter.fullname" . }}-role
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ template "nydus-snapshotter.fullname" . }}-role-binding
roleRef:
kind: ClusterRole
name: {{ template "nydus-snapshotter.fullname" . }}-role
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: {{ template "nydus-snapshotter.fullname" . }}-sa
namespace: {{ .Release.Namespace }}

View File

@ -1,88 +0,0 @@
# nydus-snapshotter Helm Chart Values
#
global:
# -- Global Docker image registry.
imageRegistry: ""
# -- Global Docker registry secret names as an array.
imagePullSecrets: []
# -- Global node labels for pod assignment.
nodeSelector: {}
# -- nydus-snapshotter name
name: nydus-snapshotter
image:
# -- Image registry.
registry: ghcr.io
# -- Image repository.
repository: containerd/nydus-snapshotter
# -- Image tag.
tag: v0.9.0
# -- Image pull policy.
pullPolicy: Always
# -- Image pull secrets.
# @default -- `[]` (defaults to global.imagePullSecrets).
pullSecrets: []
# -- Let nydus-snapshotter run in host network
hostNetwork: true
# -- Let nydus-snapshotter use the host's pid namespace
hostPid: true
# -- Host Aliases
hostAliases: []
# -- Args to overwrite default nydus-snapshotter startup command
args: []
# -- Pod resource requests and limits
resources:
requests:
cpu: "0"
memory: "0"
limits:
cpu: "2"
memory: "2Gi"
# -- Pod priorityClassName
priorityClassName: ""
# -- Node labels for pod assignment
nodeSelector: {}
# -- Pod terminationGracePeriodSeconds
terminationGracePeriodSeconds:
# -- List of node taints to tolerate
tolerations: []
# -- Pod annotations
podAnnotations: {}
# -- Pod labels
podLabels: {}
# -- Daemonset annotations
daemonsetAnnotations: {}
dragonfly:
# -- Enable dragonfly
enable: true
mirrorConfig:
- host: "http://127.0.0.1:4001"
auth_through: false
headers:
"X-Dragonfly-Registry": "https://index.docker.io"
ping_url: "http://127.0.0.1:4003/healthy"
# -- [Experimental] Container runtime support
# Choose special container runtime in Kubernetes.
# Support: Containerd, Docker, CRI-O
containerRuntime:
initContainer:
image:
# -- Image registry.
registry: ghcr.io
# -- Image repository.
repository: liubin/toml-cli
# -- Image tag.
tag: v0.0.7
# -- Image pull policy.
pullPolicy: Always
# -- [Experimental] Containerd support
containerd:
# -- Enable containerd support
# Inject nydus-snapshotter config into ${containerRuntime.containerd.configFile},
enable: true
# -- Custom config path directory, default is /etc/containerd/config.toml
configFile: "/etc/containerd/config.toml"