feat: fix docker-compose (#1087)

upgrade to v2.0.2-rc.9

added depends on(redis and mysql)

added healthcheck

Signed-off-by: 赵安家 <anjia0532@gmail.com>
This commit is contained in:
AnJia 2022-02-22 19:42:12 +08:00 committed by Gaius
parent 34abb8dbbd
commit 1c9ffcd938
No known key found for this signature in database
GPG Key ID: 8B4E5D1290FA2FFB
8 changed files with 665 additions and 88 deletions

View File

@ -1,3 +1,6 @@
cdn.json cdn.json
cdn.yaml cdn.yaml
dfget.yaml dfget.yaml
manager.yaml
scheduler.yaml

View File

@ -1,19 +1,130 @@
# server scheduler instance configuration
server:
# # ip
ip: 0.0.0.0
# # host
# host: localhost
# port is the ip and port scheduler server listens on.
port: 8002
# limit the number of requests
listenLimit: 10000
# cacheDir is dynconfig cache storage directory
# in linux, default value is /var/cache/dragonfly
# in macos(just for testing), default value is /Users/$USER/.dragonfly/cache
cacheDir: ""
# logDir is the log storage directory
# in linux, default value is /var/log/dragonfly
# in macos(just for testing), default value is /Users/$USER/.dragonfly/logs
logDir: ""
# scheduler policy configuration
scheduler:
# algorithm configuration to use different scheduling algorithms,
# default configuration supports "default" and "ml"
# "default" is the rule-based scheduling algorithm,
# "ml" is the machine learning scheduling algorithm
# It also supports user plugin extension, the algorithm value is "plugin",
# and the compiled `d7y-scheduler-plugin-evaluator.so` file is added to
# the dragonfly working directory plugins
algorithm: default
# backSourceCount is the number of backsource clients
# when the CDN is unavailable
backSourceCount: 3
# retry scheduling back-to-source limit times
retryBackSourceLimit: 5
# retry scheduling limit times
retryLimit: 20
# retry scheduling interval
retryInterval: 200ms
# gc metadata configuration
gc:
# peerGCInterval is peer's gc interval
peerGCInterval: 10m
# peerTTL is peer's TTL duration
peerTTL: 24h
# taskGCInterval is task's gc interval
taskGCInterval: 10m
# taskTTL is task's TTL duration
taskTTL: 24h
# hostGCInterval is host's gc interval
hostGCInterval: 30m
# hostTTL is host's TTL duration
hostTTL: 48h
# dynamic data configuration
dynConfig:
# dynamic config refresh interval
refreshInterval: 1m
# scheduler host configuration
host:
# idc is the idc of scheduler instance
idc: ""
# netTopology is the net topology of scheduler instance
netTopology: ""
# location is the location of scheduler instance
location: ""
# manager configuration
manager:
# addr manager access address
addr: "172.18.191.244:65003"
# schedulerClusterID cluster id to which scheduler instance belongs
schedulerClusterID: "1"
# keepAlive keep alive configuration
keepAlive:
# interval
interval: 5s
# cdn configuration
cdn:
# scheduler enable cdn as P2P peer,
# if the value is false, P2P network will not be back-to-source through
# cdn but by dfdaemon and preheat feature does not work
enable: true
# machinery async job configuration,
# see https://github.com/RichardKnop/machinery
job:
# scheduler enable job service
enable: false
# number of workers in global queue
globalWorkerNum: 1
# number of workers in scheduler queue
schedulerWorkerNum: 1
# number of workers in local queue
localWorkerNum: 5
# redis configuration
redis:
# host
host: "172.18.191.244"
# port
port: 6379
# password
password: ""
# brokerDB
brokerDB: 1
# backendDB
backendDB: 2
# enable prometheus metrics
metrics:
# scheduler enable metrics service
enable: false
# metrics service address
addr: ":8000"
# enable peer host metrics
enablePeerHost: false
# console shows log on console
console: true
# whether to enable debug level logger and enable pprof
verbose: true verbose: true
server: # listen port for pprof, only valid when the verbose option is true
port: 8002 # default is -1. If it is 0, pprof will use a random port.
pprof-port: -1
worker: # jaeger endpoint url, like: http://jaeger.dragonfly.svc:14268/api/traces
workerNum: 4 jaeger: ""
workerJobPoolSize: 10000
senderNum: 10
senderJobPoolSize: 10000
manager:
enable: false
job:
enable: false
dynconfig:
cdnDir: /opt/dragonfly/scheduler-cdn

View File

@ -1,31 +1,94 @@
version: "3.9" version: "3"
services: services:
redis:
image: redis:6-alpine
container_name: redis
network_mode: host
command: >
--requirepass dragonfly
healthcheck:
test: ["CMD", "redis-cli","-a","dragonfly","ping"]
interval: 1s
timeout: 3s
retries: 30
mysql:
image: mariadb:10.6
container_name: mysql
network_mode: host
environment:
- MARIADB_USER=dragonfly
- MARIADB_PASSWORD=dragonfly
- MARIADB_DATABASE=manager
- MARIADB_ALLOW_EMPTY_ROOT_PASSWORD=yes
healthcheck:
test: ["CMD-SHELL", "mysqladmin status"]
interval: 1s
timeout: 3s
retries: 30
manager:
image: dragonflyoss/manager:v2.0.2-rc.9
container_name: manager
network_mode: host
depends_on:
- redis
- mysql
restart: always
volumes:
- /tmp/log/dragonfly:/var/log/dragonfly
- ./config/manager.yaml:/etc/dragonfly/manager.yaml
healthcheck:
test: ["CMD-SHELL", "nc -z 127.0.0.1 8080 || exit 1"]
interval: 1s
timeout: 3s
retries: 30
dfdaemon: dfdaemon:
image: dragonflyoss/dfdaemon:v2.0.1 image: dragonflyoss/dfdaemon:v2.0.2-rc.9
depends_on: depends_on:
- scheduler - scheduler
- cdn - cdn
container_name: dfdaemon container_name: dfdaemon
network_mode: host network_mode: host
restart: always
healthcheck:
test: ["CMD-SHELL", "nc -z 127.0.0.1 65001 || exit 1"]
interval: 1s
timeout: 3s
retries: 30
volumes: volumes:
- /tmp/log/dragonfly:/var/log/dragonfly - /tmp/log/dragonfly:/var/log/dragonfly
- ./config/dfget.yaml:/etc/dragonfly/dfget.yaml - ./config/dfget.yaml:/etc/dragonfly/dfget.yaml
scheduler: scheduler:
image: dragonflyoss/scheduler:v2.0.1 image: dragonflyoss/scheduler:v2.0.2-rc.9
depends_on: depends_on:
- cdn - cdn
- manager
container_name: scheduler container_name: scheduler
network_mode: host network_mode: host
restart: always
healthcheck:
test: ["CMD-SHELL", "nc -z 127.0.0.1 8002 || exit 1"]
interval: 1s
timeout: 3s
retries: 30
volumes: volumes:
- /tmp/log/dragonfly:/var/log/dragonfly - /tmp/log/dragonfly:/var/log/dragonfly
- ./config/scheduler.yaml:/etc/dragonfly/scheduler.yaml - ./config/scheduler.yaml:/etc/dragonfly/scheduler.yaml
- ./config/cdn.json:/opt/dragonfly/scheduler-cdn/cdn.json - ./config/cdn.json:/opt/dragonfly/scheduler-cdn/cdn.json
cdn: cdn:
image: dragonflyoss/cdn:v2.0.1 image: dragonflyoss/cdn:v2.0.2-rc.9
container_name: cdn container_name: cdn
network_mode: host network_mode: host
restart: always
healthcheck:
test: ["CMD-SHELL", "nc -z 127.0.0.1 8001 || exit 1"]
interval: 1s
timeout: 3s
retries: 30
volumes: volumes:
- /tmp/log/dragonfly:/var/log/dragonfly - /tmp/log/dragonfly:/var/log/dragonfly
- ./config/cdn.yaml:/etc/dragonfly/cdn.yaml - ./config/cdn.yaml:/etc/dragonfly/cdn.yaml

View File

@ -14,6 +14,8 @@ prepare(){
cat template/cdn.template.json > config/cdn.json cat template/cdn.template.json > config/cdn.json
cat template/cdn.template.yaml > config/cdn.yaml cat template/cdn.template.yaml > config/cdn.yaml
cat template/dfget.template.yaml > config/dfget.yaml cat template/dfget.template.yaml > config/dfget.yaml
cat template/scheduler.template.yaml > config/scheduler.yaml
cat template/manager.template.yaml > config/manager.yaml
ip=${IP:-$(hostname -i)} ip=${IP:-$(hostname -i)}
hostname=$(hostname) hostname=$(hostname)
@ -21,6 +23,8 @@ prepare(){
sed -i "s,__IP__,$ip," config/cdn.json sed -i "s,__IP__,$ip," config/cdn.json
sed -i "s,__IP__,$ip," config/dfget.yaml sed -i "s,__IP__,$ip," config/dfget.yaml
sed -i "s,__IP__,$ip," config/cdn.yaml sed -i "s,__IP__,$ip," config/cdn.yaml
sed -i "s,__IP__,$ip," config/scheduler.yaml
sed -i "s,__IP__,$ip," config/manager.yaml
sed -i "s,__HOSTNAME__,$hostname," config/cdn.json sed -i "s,__HOSTNAME__,$hostname," config/cdn.json
} }
@ -62,7 +66,24 @@ case "$1" in
*) *)
if [ -z "$1" ]; then if [ -z "$1" ]; then
# start all of docker-compose defined service
docker-compose up -d docker-compose up -d
# docker-compose version 3 depends_on does not wait for redis and mysql to be “ready” before starting manager ...
# doc https://docs.docker.com/compose/compose-file/compose-file-v3/#depends_on
for i in $(seq 0 10); do
service_num=$(docker-compose ps --services |wc -l)
ready_num=$(docker-compose ps | grep healthy | wc -l)
if [ "$service_num" -eq "$ready_num" ]; then
break
fi
echo "wait for all service ready: $ready_num/$service_num,$i times check"
sleep 2
done
# print service list info
docker-compose ps
exit 0 exit 0
fi fi
echo "unknown argument: $1" echo "unknown argument: $1"

View File

@ -5,7 +5,6 @@ base:
# listenPort is the port cdn server listens on. # listenPort is the port cdn server listens on.
# default: 8003 # default: 8003
listenPort: 8003 listenPort: 8003
advertiseIP: __IP__
# DownloadPort is the port for download files from cdn. # DownloadPort is the port for download files from cdn.
# And you should start a file server firstly which listens on the download port. # And you should start a file server firstly which listens on the download port.
@ -17,12 +16,12 @@ base:
systemReservedBandwidth: 20M systemReservedBandwidth: 20M
# MaxBandwidth is the network bandwidth that cdn can use. # MaxBandwidth is the network bandwidth that cdn can use.
# default: 200 MB, in format of G(B)/g/M(B)/m/K(B)/k/B, pure number will also be parsed as Byte. # default: 1G, in format of G(B)/g/M(B)/m/K(B)/k/B, pure number will also be parsed as Byte.
maxBandwidth: 1024M maxBandwidth: 1G
# Whether to enable profiler # AdvertiseIP is used to set the ip that we advertise to other peer in the p2p-network.
# default: false # By default, the first non-loop address is advertised.
enableProfiler: false advertiseIP: __IP__
# FailAccessInterval is the interval time after failed to access the URL. # FailAccessInterval is the interval time after failed to access the URL.
# If a task failed to be downloaded from the source, it will not be retried in the time since the last failure. # If a task failed to be downloaded from the source, it will not be retried in the time since the last failure.
@ -37,20 +36,37 @@ base:
# default: 2m0s # default: 2m0s
gcMetaInterval: 2m gcMetaInterval: 2m
# gcStorageInterval is the interval time to execute GC storage.
# default: 15s
gcStorageInterval: 15s
# TaskExpireTime when a task is not accessed within the taskExpireTime, # TaskExpireTime when a task is not accessed within the taskExpireTime,
# and it will be treated to be expired. # and it will be treated to be expired.
# default: 3m0s # default: 3m0s
taskExpireTime: 3m taskExpireTime: 3m
# StoragePattern is the pattern of storage policy, [disk/hybrid] # storageMode is the Mode of storage policy, [disk/hybrid]
storagePattern: disk storageMode: disk
# logDir is the log storage directory
# in linux, default value is /var/log/dragonfly
# in macos(just for testing), default value is /Users/$USER/.dragonfly/logs
logDir: ""
# manager configuration
manager:
addr: "__IP__:65003"
cdnClusterID: "1"
keepAlive:
interval: 5s
# host configuration
host:
location:
idc:
# enable prometheus metrics
# metrics:
# # metrics service address
# addr: ":8000"
# Console shows log on console
console: false
plugins: plugins:
storageDriver: storageDriver:
@ -58,16 +74,52 @@ plugins:
enable: true enable: true
config: config:
baseDir: /tmp/cdn baseDir: /tmp/cdn
storageManager:
storagemanager:
- name: disk - name: disk
enable: true enable: true
config: config:
gcInitialDelay: 5s gcInitialDelay: 0s
gcInterval: 15s gcInterval: 15s
driverConfigs: driverConfigs:
disk: disk:
gcConfig: gcConfig:
youngGCThreshold: 100G youngGCThreshold: 100.0GB
fullGCThreshold: 5G fullGCThreshold: 5.0GB
cleanRatio: 1 cleanRatio: 1
intervalThreshold: 2h intervalThreshold: 2h0m0s
- name: hybrid
enable: false
config:
gcInitialDelay: 0s
gcInterval: 15s
driverConfigs:
disk:
gcConfig:
youngGCThreshold: 100.0GB
fullGCThreshold: 5.0GB
cleanRatio: 1
intervalThreshold: 2h0m0s
memory:
gcConfig:
youngGCThreshold: 100.0GB
fullGCThreshold: 5.0GB
cleanRatio: 3
intervalThreshold: 2h0m0s
# console shows log on console
console: false
# whether to enable debug level logger and enable pprof
verbose: false
# listen port for pprof, only valid when the verbose option is true
# default is -1. If it is 0, pprof will use a random port.
pprof-port: -1
# jaeger endpoint url, like: http://jaeger.dragonfly.svc:14268/api/traces
jaeger: ""
# service name used in tracer
# default: dragonfly-cdn
service-name: dragonfly-cdn

View File

@ -1,28 +1,70 @@
# daemon alive time, when sets 0s, daemon will not auto exit # daemon alive time, when sets 0s, daemon will not auto exit
# it is useful for longtime running
aliveTime: 0s aliveTime: 0s
# daemon gc task running interval # daemon gc task running interval
gcInterval: 1m0s gcInterval: 1m0s
# daemon work directory, daemon will change current working directory to this
# in linux, default value is /usr/local/dragonfly
# in macos(just for testing), default value is /Users/$USER/.dragonfly
workHome: ""
# cacheDir is dynconfig cache storage directory
# in linux, default value is /var/cache/dragonfly
# in macos(just for testing), default value is /Users/$USER/.dragonfly/cache
cacheDir: ""
# logDir is the log storage directory
# in linux, default value is /var/log/dragonfly
# in macos(just for testing), default value is /Users/$USER/.dragonfly/logs
logDir: ""
# dataDir is the download data storage directory
# in linux, default value is /var/lib/dragonfly
# in macos(just for testing), default value is /Users/$USER/.dragonfly/data
dataDir: ""
# when daemon exit, keep peer task data or not # when daemon exit, keep peer task data or not
# it is usefully when upgrade daemon service, all local cache will be saved # it is usefully when upgrade daemon service, all local cache will be saved
# default is false
keepStorage: true keepStorage: true
# console shows log on console
console: false
# whether to enable debug level logger and enable pprof
verbose: false
# listen port for pprof, only valid when the verbose option is true
# default is -1. If it is 0, pprof will use a random port.
pprof-port: -1
# jaeger endpoint url, like: http://jaeger.dragonfly.svc:14268/api/traces
jaeger: ""
# all addresses of all schedulers # all addresses of all schedulers
# the schedulers of all daemons should be same in one region or zone. # the schedulers of all daemons should be same in one region or zone.
# daemon will send tasks to a fixed scheduler by hashing the task url and meta data # daemon will send tasks to a fixed scheduler by hashing the task url and meta data
# caution: only tcp is supported # caution: only tcp is supported
scheduler: scheduler:
manager:
# get scheduler list dynamically from manager
enable: false
# manager service addresses
netAddrs:
- type: tcp
addr: __IP__:65003
# scheduler list refresh interval
refreshInterval: 5m
# schedule timeout
scheduleTimeout: 30s
# when true, only scheduler says back source, daemon can back source
disableAutoBackSource: false
# below example is a stand address # below example is a stand address
netAddrs: netAddrs:
- type: tcp - type: tcp
addr: __IP__:8002 addr: __IP__:8002
# schedule timeout
scheduleTimeout: 10s
# when enable, pprof will be enabled,
verbose: true
console: false
# current host info used for scheduler # current host info used for scheduler
host: host:
@ -32,17 +74,43 @@ host:
# access ip for other peers # access ip for other peers
# when local ip is different with access ip, advertiseIP should be set # when local ip is different with access ip, advertiseIP should be set
advertiseIP: __IP__ advertiseIP: __IP__
# geographical location and network topology # geographical location, separated by "|" characters
location: "" location: ""
# idc deployed by daemon
idc: "" idc: ""
# security domain deployed by daemon, network isolation between different security domains
securityDomain: "" securityDomain: ""
# network topology, separated by "|" characters
netTopology: "" netTopology: ""
# daemon hostname
# hostname: ""
# download service option # download service option
download: download:
# download limit per second # calculate digest when transfer files, set false to save memory
totalRateLimit: 1024Mi calculateDigest: true
perPeerRateLimit: 1024Mi # total download limit per second
totalRateLimit: 200Mi
# per peer task download limit per second
perPeerRateLimit: 100Mi
# download piece timeout
pieceDownloadTimeout: 30s
# golang transport option
transportOption:
# dial timeout
dialTimeout: 2s
# keep alive
keepAlive: 30s
# same with http.Transport.MaxIdleConns
maxIdleConns: 100
# same with http.Transport.IdleConnTimeout
idleConnTimeout: 90s
# same with http.Transport.ResponseHeaderTimeout
responseHeaderTimeout: 2s
# same with http.Transport.TLSHandshakeTimeout
tlsHandshakeTimeout: 1s
# same with http.Transport.ExpectContinueTimeout
expectContinueTimeout: 2s
# download grpc option # download grpc option
downloadGRPC: downloadGRPC:
# security option # security option
@ -55,6 +123,8 @@ download:
# download service listen address # download service listen address
# current, only support unix domain socket # current, only support unix domain socket
unixListen: unixListen:
# in linux, default value is /var/run/dfdaemon.sock
# in macos(just for testing), default value is /tmp/dfdaemon.sock
socket: /var/run/dfdaemon.sock socket: /var/run/dfdaemon.sock
# peer grpc option # peer grpc option
# peer grpc service send pieces info to other peers # peer grpc service send pieces info to other peers
@ -75,14 +145,10 @@ download:
# start: 65000 # start: 65000
# end: 65009 # end: 65009
# proxy config file location or detail config
# proxy: ""
# upload service option # upload service option
upload: upload:
defaultFilter: "Expires&Signature"
# upload limit per second # upload limit per second
rateLimit: 200Mi rateLimit: 100Mi
security: security:
insecure: true insecure: true
cacert: "" cacert: ""
@ -103,19 +169,42 @@ upload:
storage: storage:
# task data expire time # task data expire time
# when there is no access to a task data, this task will be gc. # when there is no access to a task data, this task will be gc.
taskExpireTime: 1m0s taskExpireTime: 6h
# storage strategy when process task data
# io.d7y.storage.v2.simple : download file to data directory first, then copy to output path, this is default action
# the download file in date directory will be the peer data for uploading to other peers
# io.d7y.storage.v2.advance: download file directly to output path with postfix, hard link to final output,
# avoid copy to output path, fast than simple strategy, but:
# the output file with postfix will be the peer data for uploading to other peers
# when user delete or change this file, this peer data will be corrupted
# default is io.d7y.storage.v2.advance
strategy: io.d7y.storage.v2.advance
# disk quota gc threshold, when the quota of all tasks exceeds the gc threshold, the oldest tasks will be reclaimed.
diskGCThreshold: 50Gi
# disk used percent gc threshold, when the disk used percent exceeds, the oldest tasks will be reclaimed.
# eg, diskGCThresholdPercent=80, when the disk usage is above 80%, start to gc the oldest tasks
diskGCThresholdPercent: 80
# set to ture for reusing underlying storage for same task id
multiplex: true multiplex: true
# proxy service option # proxy service config file location or detail config
# proxy: ""
# proxy service detail option
proxy: proxy:
# filter for hash url
# when defaultFilter: "Expires&Signature", for example:
# http://localhost/xyz?Expires=111&Signature=222 and http://localhost/xyz?Expires=333&Signature=999
# is same task
defaultFilter: "Expires&Signature"
security: security:
insecure: true insecure: true
cacert: "" cacert: ""
cert: "" cert: ""
key: "" key: ""
tcpListen: tcpListen:
# Namespace stands the linux net namespace, like /proc/1/ns/net # namespace stands the linux net namespace, like /proc/1/ns/net
# It's useful for running daemon in pod with ip allocated and listen in host # it's useful for running daemon in pod with ip allocated and listening the special port in host net namespace
# Linux only # Linux only
namespace: "" namespace: ""
# listen address # listen address
@ -128,16 +217,51 @@ proxy:
# start: 65020 # start: 65020
# end: 65029 # end: 65029
registryMirror: registryMirror:
# when enable, using header "X-Dragonfly-Registry" for remote instead of url
dynamic: true
# url for the registry mirror # url for the registry mirror
url: https://index.docker.io url: https://index.docker.io
# whether to ignore https certificate errors # whether to ignore https certificate errors
insecure: true insecure: true
# optional certificates if the remote server uses self-signed certificates # optional certificates if the remote server uses self-signed certificates
certs: [ ] certs: []
# whether to request the remote registry directly # whether to request the remote registry directly
direct: false direct: false
# whether to use proxies to decide if dragonfly should be used
useProxies: false
proxies: proxies:
# proxy all http image layer download requests with dfget # proxy all http image layer download requests with dfget
- regx: (blobs|manifests|config)/sha256.* - regx: blobs/sha256.*
- regx: __IP__.* # test only # change http requests to some-registry to https and proxy them with dfget
- regx: some-registry/
useHTTPS: true
# proxy requests directly, without dfget
- regx: no-proxy-reg
direct: true
# proxy requests with redirect
- regx: some-registry
redirect: another-registry
# the same with url rewrite like apache ProxyPass directive
- regx: ^http://some-registry/(.*)
redirect: http://another-registry/$1
hijackHTTPS:
# key pair used to hijack https requests
cert: ""
key: ""
hosts:
- regx: mirror.aliyuncs.com:443 # regexp to match request hosts
# whether to ignore https certificate errors
insecure: true
# optional certificates if the host uses self-signed certificates
certs: []
# max tasks to download same time, 0 is no limit
maxConcurrency: 0
whiteList:
# the host of the whitelist
- host: ""
# match whitelist hosts
regx:
# port that need to be added to the whitelist
ports:

View File

@ -0,0 +1,73 @@
# current server info used for server
server:
# grpc server configure
grpc:
# listen address
listen: __IP__
# listen port, manager will try to listen
# when this port is not available, manager will try next port
port:
start: 65003
end: 65003
# rest server configure
rest:
# stand address
addr: :8080
# front-end console resource path
# publicPath: /dist
# database info used for server
database:
# mysql configure
mysql:
user: dragonfly
password: dragonfly
host: __IP__
port: 3306
dbname: manager
migrate: true
# tls:
# # client certificate file path
# cert: /etc/ssl/certs/cert.pem
# # client key file path
# key: /etc/ssl/private/key.pem
# # ca file path
# ca: /etc/ssl/certs/ca.pem
# # whether a client verifies the server's certificate chain and host name.
# insecureSkipVerify: true
# redis configure
redis:
password: dragonfly
host: __IP__
port: 6379
db: 0
# manager server cache
# cache:
# # redis cache configure
# redis:
# # cache ttl configure
# ttl: 30s
# # local cache configure
# local:
# # lfu cache size
# size: 10000
# # cache ttl configure
# ttl: 30s
# enable prometheus metrics
# metrics:
# # metrics service address
# addr: ":8000"
# console shows log on console
console: false
# whether to enable debug level logger and enable pprof
verbose: false
# listen port for pprof, only valid when the verbose option is true
# default is -1. If it is 0, pprof will use a random port.
pprof-port: -1
# jaeger endpoint url, like: http://jaeger.dragonfly.svc:14268/api/traces
jaeger: ""

View File

@ -0,0 +1,130 @@
# server scheduler instance configuration
server:
# # ip
ip: 0.0.0.0
# # host
# host: localhost
# port is the ip and port scheduler server listens on.
port: 8002
# limit the number of requests
listenLimit: 10000
# cacheDir is dynconfig cache storage directory
# in linux, default value is /var/cache/dragonfly
# in macos(just for testing), default value is /Users/$USER/.dragonfly/cache
cacheDir: ""
# logDir is the log storage directory
# in linux, default value is /var/log/dragonfly
# in macos(just for testing), default value is /Users/$USER/.dragonfly/logs
logDir: ""
# scheduler policy configuration
scheduler:
# algorithm configuration to use different scheduling algorithms,
# default configuration supports "default" and "ml"
# "default" is the rule-based scheduling algorithm,
# "ml" is the machine learning scheduling algorithm
# It also supports user plugin extension, the algorithm value is "plugin",
# and the compiled `d7y-scheduler-plugin-evaluator.so` file is added to
# the dragonfly working directory plugins
algorithm: default
# backSourceCount is the number of backsource clients
# when the CDN is unavailable
backSourceCount: 3
# retry scheduling back-to-source limit times
retryBackSourceLimit: 5
# retry scheduling limit times
retryLimit: 20
# retry scheduling interval
retryInterval: 200ms
# gc metadata configuration
gc:
# peerGCInterval is peer's gc interval
peerGCInterval: 10m
# peerTTL is peer's TTL duration
peerTTL: 24h
# taskGCInterval is task's gc interval
taskGCInterval: 10m
# taskTTL is task's TTL duration
taskTTL: 24h
# hostGCInterval is host's gc interval
hostGCInterval: 30m
# hostTTL is host's TTL duration
hostTTL: 48h
# dynamic data configuration
dynConfig:
# dynamic config refresh interval
refreshInterval: 1m
# scheduler host configuration
host:
# idc is the idc of scheduler instance
idc: ""
# netTopology is the net topology of scheduler instance
netTopology: ""
# location is the location of scheduler instance
location: ""
# manager configuration
manager:
# addr manager access address
addr: "__IP__:65003"
# schedulerClusterID cluster id to which scheduler instance belongs
schedulerClusterID: "1"
# keepAlive keep alive configuration
keepAlive:
# interval
interval: 5s
# cdn configuration
cdn:
# scheduler enable cdn as P2P peer,
# if the value is false, P2P network will not be back-to-source through
# cdn but by dfdaemon and preheat feature does not work
enable: true
# machinery async job configuration,
# see https://github.com/RichardKnop/machinery
job:
# scheduler enable job service
enable: false
# number of workers in global queue
globalWorkerNum: 1
# number of workers in scheduler queue
schedulerWorkerNum: 1
# number of workers in local queue
localWorkerNum: 5
# redis configuration
redis:
# host
host: "__IP__"
# port
port: 6379
# password
password: ""
# brokerDB
brokerDB: 1
# backendDB
backendDB: 2
# enable prometheus metrics
metrics:
# scheduler enable metrics service
enable: false
# metrics service address
addr: ":8000"
# enable peer host metrics
enablePeerHost: false
# console shows log on console
console: true
# whether to enable debug level logger and enable pprof
verbose: true
# listen port for pprof, only valid when the verbose option is true
# default is -1. If it is 0, pprof will use a random port.
pprof-port: -1
# jaeger endpoint url, like: http://jaeger.dragonfly.svc:14268/api/traces
jaeger: ""