diff --git a/deploy/docker-compose/config/.gitignore b/deploy/docker-compose/config/.gitignore index fcb647e1f..567181b36 100644 --- a/deploy/docker-compose/config/.gitignore +++ b/deploy/docker-compose/config/.gitignore @@ -1,3 +1,6 @@ cdn.json cdn.yaml dfget.yaml +manager.yaml +scheduler.yaml + diff --git a/deploy/docker-compose/config/scheduler.yaml b/deploy/docker-compose/config/scheduler.yaml index dc0c7a184..ea982f2cd 100644 --- a/deploy/docker-compose/config/scheduler.yaml +++ b/deploy/docker-compose/config/scheduler.yaml @@ -1,19 +1,130 @@ +# server scheduler instance configuration +server: + # # ip + ip: 0.0.0.0 + # # host + # host: localhost + # port is the ip and port scheduler server listens on. + port: 8002 + # limit the number of requests + listenLimit: 10000 + # cacheDir is dynconfig cache storage directory + # in linux, default value is /var/cache/dragonfly + # in macos(just for testing), default value is /Users/$USER/.dragonfly/cache + cacheDir: "" + # logDir is the log storage directory + # in linux, default value is /var/log/dragonfly + # in macos(just for testing), default value is /Users/$USER/.dragonfly/logs + logDir: "" + +# scheduler policy configuration +scheduler: + # algorithm configuration to use different scheduling algorithms, + # default configuration supports "default" and "ml" + # "default" is the rule-based scheduling algorithm, + # "ml" is the machine learning scheduling algorithm + # It also supports user plugin extension, the algorithm value is "plugin", + # and the compiled `d7y-scheduler-plugin-evaluator.so` file is added to + # the dragonfly working directory plugins + algorithm: default + # backSourceCount is the number of backsource clients + # when the CDN is unavailable + backSourceCount: 3 + # retry scheduling back-to-source limit times + retryBackSourceLimit: 5 + # retry scheduling limit times + retryLimit: 20 + # retry scheduling interval + retryInterval: 200ms + # gc metadata configuration + gc: + # peerGCInterval is peer's gc interval + peerGCInterval: 10m + # peerTTL is peer's TTL duration + peerTTL: 24h + # taskGCInterval is task's gc interval + taskGCInterval: 10m + # taskTTL is task's TTL duration + taskTTL: 24h + # hostGCInterval is host's gc interval + hostGCInterval: 30m + # hostTTL is host's TTL duration + hostTTL: 48h + +# dynamic data configuration +dynConfig: + # dynamic config refresh interval + refreshInterval: 1m + +# scheduler host configuration +host: + # idc is the idc of scheduler instance + idc: "" + # netTopology is the net topology of scheduler instance + netTopology: "" + # location is the location of scheduler instance + location: "" + +# manager configuration +manager: + # addr manager access address + addr: "172.18.191.244:65003" + # schedulerClusterID cluster id to which scheduler instance belongs + schedulerClusterID: "1" + # keepAlive keep alive configuration + keepAlive: + # interval + interval: 5s + +# cdn configuration +cdn: + # scheduler enable cdn as P2P peer, + # if the value is false, P2P network will not be back-to-source through + # cdn but by dfdaemon and preheat feature does not work + enable: true + +# machinery async job configuration, +# see https://github.com/RichardKnop/machinery +job: + # scheduler enable job service + enable: false + # number of workers in global queue + globalWorkerNum: 1 + # number of workers in scheduler queue + schedulerWorkerNum: 1 + # number of workers in local queue + localWorkerNum: 5 + # redis configuration + redis: + # host + host: "172.18.191.244" + # port + port: 6379 + # password + password: "" + # brokerDB + brokerDB: 1 + # backendDB + backendDB: 2 + +# enable prometheus metrics +metrics: + # scheduler enable metrics service + enable: false + # metrics service address + addr: ":8000" + # enable peer host metrics + enablePeerHost: false + +# console shows log on console +console: true + +# whether to enable debug level logger and enable pprof verbose: true -server: - port: 8002 +# listen port for pprof, only valid when the verbose option is true +# default is -1. If it is 0, pprof will use a random port. +pprof-port: -1 -worker: - workerNum: 4 - workerJobPoolSize: 10000 - senderNum: 10 - senderJobPoolSize: 10000 - -manager: - enable: false - -job: - enable: false - -dynconfig: - cdnDir: /opt/dragonfly/scheduler-cdn +# jaeger endpoint url, like: http://jaeger.dragonfly.svc:14268/api/traces +jaeger: "" \ No newline at end of file diff --git a/deploy/docker-compose/docker-compose.yaml b/deploy/docker-compose/docker-compose.yaml index 16a1ac923..e08868cd8 100644 --- a/deploy/docker-compose/docker-compose.yaml +++ b/deploy/docker-compose/docker-compose.yaml @@ -1,32 +1,95 @@ -version: "3.9" +version: "3" services: - dfdaemon: - image: dragonflyoss/dfdaemon:v2.0.1 + redis: + image: redis:6-alpine + container_name: redis + network_mode: host + command: > + --requirepass dragonfly + healthcheck: + test: ["CMD", "redis-cli","-a","dragonfly","ping"] + interval: 1s + timeout: 3s + retries: 30 + + mysql: + image: mariadb:10.6 + container_name: mysql + network_mode: host + environment: + - MARIADB_USER=dragonfly + - MARIADB_PASSWORD=dragonfly + - MARIADB_DATABASE=manager + - MARIADB_ALLOW_EMPTY_ROOT_PASSWORD=yes + healthcheck: + test: ["CMD-SHELL", "mysqladmin status"] + interval: 1s + timeout: 3s + retries: 30 + + manager: + image: dragonflyoss/manager:v2.0.2-rc.9 + container_name: manager + network_mode: host depends_on: - - scheduler - - cdn + - redis + - mysql + restart: always + volumes: + - /tmp/log/dragonfly:/var/log/dragonfly + - ./config/manager.yaml:/etc/dragonfly/manager.yaml + healthcheck: + test: ["CMD-SHELL", "nc -z 127.0.0.1 8080 || exit 1"] + interval: 1s + timeout: 3s + retries: 30 + + dfdaemon: + image: dragonflyoss/dfdaemon:v2.0.2-rc.9 + depends_on: + - scheduler + - cdn container_name: dfdaemon network_mode: host + restart: always + healthcheck: + test: ["CMD-SHELL", "nc -z 127.0.0.1 65001 || exit 1"] + interval: 1s + timeout: 3s + retries: 30 volumes: - - /tmp/log/dragonfly:/var/log/dragonfly - - ./config/dfget.yaml:/etc/dragonfly/dfget.yaml + - /tmp/log/dragonfly:/var/log/dragonfly + - ./config/dfget.yaml:/etc/dragonfly/dfget.yaml scheduler: - image: dragonflyoss/scheduler:v2.0.1 + image: dragonflyoss/scheduler:v2.0.2-rc.9 depends_on: - - cdn + - cdn + - manager container_name: scheduler network_mode: host + restart: always + healthcheck: + test: ["CMD-SHELL", "nc -z 127.0.0.1 8002 || exit 1"] + interval: 1s + timeout: 3s + retries: 30 volumes: - - /tmp/log/dragonfly:/var/log/dragonfly - - ./config/scheduler.yaml:/etc/dragonfly/scheduler.yaml - - ./config/cdn.json:/opt/dragonfly/scheduler-cdn/cdn.json + - /tmp/log/dragonfly:/var/log/dragonfly + - ./config/scheduler.yaml:/etc/dragonfly/scheduler.yaml + - ./config/cdn.json:/opt/dragonfly/scheduler-cdn/cdn.json cdn: - image: dragonflyoss/cdn:v2.0.1 + image: dragonflyoss/cdn:v2.0.2-rc.9 container_name: cdn network_mode: host + restart: always + healthcheck: + test: ["CMD-SHELL", "nc -z 127.0.0.1 8001 || exit 1"] + interval: 1s + timeout: 3s + retries: 30 volumes: - - /tmp/log/dragonfly:/var/log/dragonfly - - ./config/cdn.yaml:/etc/dragonfly/cdn.yaml - - ./config/nginx.conf:/etc/nginx/nginx.conf + - /tmp/log/dragonfly:/var/log/dragonfly + - ./config/cdn.yaml:/etc/dragonfly/cdn.yaml + - ./config/nginx.conf:/etc/nginx/nginx.conf diff --git a/deploy/docker-compose/run.sh b/deploy/docker-compose/run.sh index c371acce1..e0f5eee51 100755 --- a/deploy/docker-compose/run.sh +++ b/deploy/docker-compose/run.sh @@ -14,6 +14,8 @@ prepare(){ cat template/cdn.template.json > config/cdn.json cat template/cdn.template.yaml > config/cdn.yaml cat template/dfget.template.yaml > config/dfget.yaml + cat template/scheduler.template.yaml > config/scheduler.yaml + cat template/manager.template.yaml > config/manager.yaml ip=${IP:-$(hostname -i)} hostname=$(hostname) @@ -21,6 +23,8 @@ prepare(){ sed -i "s,__IP__,$ip," config/cdn.json sed -i "s,__IP__,$ip," config/dfget.yaml sed -i "s,__IP__,$ip," config/cdn.yaml + sed -i "s,__IP__,$ip," config/scheduler.yaml + sed -i "s,__IP__,$ip," config/manager.yaml sed -i "s,__HOSTNAME__,$hostname," config/cdn.json } @@ -62,7 +66,24 @@ case "$1" in *) if [ -z "$1" ]; then + + # start all of docker-compose defined service docker-compose up -d + + # docker-compose version 3 depends_on does not wait for redis and mysql to be “ready” before starting manager ... + # doc https://docs.docker.com/compose/compose-file/compose-file-v3/#depends_on + for i in $(seq 0 10); do + service_num=$(docker-compose ps --services |wc -l) + ready_num=$(docker-compose ps | grep healthy | wc -l) + if [ "$service_num" -eq "$ready_num" ]; then + break + fi + echo "wait for all service ready: $ready_num/$service_num,$i times check" + sleep 2 + done + + # print service list info + docker-compose ps exit 0 fi echo "unknown argument: $1" diff --git a/deploy/docker-compose/template/cdn.template.yaml b/deploy/docker-compose/template/cdn.template.yaml index 67d459328..53d1c13c6 100644 --- a/deploy/docker-compose/template/cdn.template.yaml +++ b/deploy/docker-compose/template/cdn.template.yaml @@ -5,7 +5,6 @@ base: # listenPort is the port cdn server listens on. # default: 8003 listenPort: 8003 - advertiseIP: __IP__ # DownloadPort is the port for download files from cdn. # And you should start a file server firstly which listens on the download port. @@ -17,12 +16,12 @@ base: systemReservedBandwidth: 20M # MaxBandwidth is the network bandwidth that cdn can use. - # default: 200 MB, in format of G(B)/g/M(B)/m/K(B)/k/B, pure number will also be parsed as Byte. - maxBandwidth: 1024M + # default: 1G, in format of G(B)/g/M(B)/m/K(B)/k/B, pure number will also be parsed as Byte. + maxBandwidth: 1G - # Whether to enable profiler - # default: false - enableProfiler: false + # AdvertiseIP is used to set the ip that we advertise to other peer in the p2p-network. + # By default, the first non-loop address is advertised. + advertiseIP: __IP__ # FailAccessInterval is the interval time after failed to access the URL. # If a task failed to be downloaded from the source, it will not be retried in the time since the last failure. @@ -37,37 +36,90 @@ base: # default: 2m0s gcMetaInterval: 2m - # gcStorageInterval is the interval time to execute GC storage. - # default: 15s - gcStorageInterval: 15s - # TaskExpireTime when a task is not accessed within the taskExpireTime, # and it will be treated to be expired. # default: 3m0s taskExpireTime: 3m - # StoragePattern is the pattern of storage policy, [disk/hybrid] - storagePattern: disk + # storageMode is the Mode of storage policy, [disk/hybrid] + storageMode: disk + + + # logDir is the log storage directory + # in linux, default value is /var/log/dragonfly + # in macos(just for testing), default value is /Users/$USER/.dragonfly/logs + logDir: "" + + # manager configuration + manager: + addr: "__IP__:65003" + cdnClusterID: "1" + keepAlive: + interval: 5s + + # host configuration + host: + location: + idc: + + # enable prometheus metrics + # metrics: + # # metrics service address + # addr: ":8000" - # Console shows log on console - console: false plugins: storageDriver: - - name: disk - enable: true - config: - baseDir: /tmp/cdn - storageManager: - - name: disk - enable: true - config: - gcInitialDelay: 5s - gcInterval: 15s - driverConfigs: - disk: - gcConfig: - youngGCThreshold: 100G - fullGCThreshold: 5G - cleanRatio: 1 - intervalThreshold: 2h + - name: disk + enable: true + config: + baseDir: /tmp/cdn + + storagemanager: + - name: disk + enable: true + config: + gcInitialDelay: 0s + gcInterval: 15s + driverConfigs: + disk: + gcConfig: + youngGCThreshold: 100.0GB + fullGCThreshold: 5.0GB + cleanRatio: 1 + intervalThreshold: 2h0m0s + - name: hybrid + enable: false + config: + gcInitialDelay: 0s + gcInterval: 15s + driverConfigs: + disk: + gcConfig: + youngGCThreshold: 100.0GB + fullGCThreshold: 5.0GB + cleanRatio: 1 + intervalThreshold: 2h0m0s + memory: + gcConfig: + youngGCThreshold: 100.0GB + fullGCThreshold: 5.0GB + cleanRatio: 3 + intervalThreshold: 2h0m0s + +# console shows log on console +console: false + +# whether to enable debug level logger and enable pprof +verbose: false + +# listen port for pprof, only valid when the verbose option is true +# default is -1. If it is 0, pprof will use a random port. +pprof-port: -1 + +# jaeger endpoint url, like: http://jaeger.dragonfly.svc:14268/api/traces +jaeger: "" + +# service name used in tracer +# default: dragonfly-cdn +service-name: dragonfly-cdn diff --git a/deploy/docker-compose/template/dfget.template.yaml b/deploy/docker-compose/template/dfget.template.yaml index 2d1c763ac..2a8a6da12 100644 --- a/deploy/docker-compose/template/dfget.template.yaml +++ b/deploy/docker-compose/template/dfget.template.yaml @@ -1,28 +1,70 @@ # daemon alive time, when sets 0s, daemon will not auto exit +# it is useful for longtime running aliveTime: 0s # daemon gc task running interval gcInterval: 1m0s +# daemon work directory, daemon will change current working directory to this +# in linux, default value is /usr/local/dragonfly +# in macos(just for testing), default value is /Users/$USER/.dragonfly +workHome: "" + +# cacheDir is dynconfig cache storage directory +# in linux, default value is /var/cache/dragonfly +# in macos(just for testing), default value is /Users/$USER/.dragonfly/cache +cacheDir: "" + +# logDir is the log storage directory +# in linux, default value is /var/log/dragonfly +# in macos(just for testing), default value is /Users/$USER/.dragonfly/logs +logDir: "" + +# dataDir is the download data storage directory +# in linux, default value is /var/lib/dragonfly +# in macos(just for testing), default value is /Users/$USER/.dragonfly/data +dataDir: "" + # when daemon exit, keep peer task data or not # it is usefully when upgrade daemon service, all local cache will be saved +# default is false keepStorage: true +# console shows log on console +console: false + +# whether to enable debug level logger and enable pprof +verbose: false + +# listen port for pprof, only valid when the verbose option is true +# default is -1. If it is 0, pprof will use a random port. +pprof-port: -1 + +# jaeger endpoint url, like: http://jaeger.dragonfly.svc:14268/api/traces +jaeger: "" + # all addresses of all schedulers # the schedulers of all daemons should be same in one region or zone. # daemon will send tasks to a fixed scheduler by hashing the task url and meta data # caution: only tcp is supported scheduler: + manager: + # get scheduler list dynamically from manager + enable: false + # manager service addresses + netAddrs: + - type: tcp + addr: __IP__:65003 + # scheduler list refresh interval + refreshInterval: 5m + # schedule timeout + scheduleTimeout: 30s + # when true, only scheduler says back source, daemon can back source + disableAutoBackSource: false # below example is a stand address netAddrs: - - type: tcp - addr: __IP__:8002 - # schedule timeout - scheduleTimeout: 10s - -# when enable, pprof will be enabled, -verbose: true -console: false + - type: tcp + addr: __IP__:8002 # current host info used for scheduler host: @@ -32,17 +74,43 @@ host: # access ip for other peers # when local ip is different with access ip, advertiseIP should be set advertiseIP: __IP__ - # geographical location and network topology + # geographical location, separated by "|" characters location: "" + # idc deployed by daemon idc: "" + # security domain deployed by daemon, network isolation between different security domains securityDomain: "" + # network topology, separated by "|" characters netTopology: "" + # daemon hostname + # hostname: "" # download service option download: - # download limit per second - totalRateLimit: 1024Mi - perPeerRateLimit: 1024Mi + # calculate digest when transfer files, set false to save memory + calculateDigest: true + # total download limit per second + totalRateLimit: 200Mi + # per peer task download limit per second + perPeerRateLimit: 100Mi + # download piece timeout + pieceDownloadTimeout: 30s + # golang transport option + transportOption: + # dial timeout + dialTimeout: 2s + # keep alive + keepAlive: 30s + # same with http.Transport.MaxIdleConns + maxIdleConns: 100 + # same with http.Transport.IdleConnTimeout + idleConnTimeout: 90s + # same with http.Transport.ResponseHeaderTimeout + responseHeaderTimeout: 2s + # same with http.Transport.TLSHandshakeTimeout + tlsHandshakeTimeout: 1s + # same with http.Transport.ExpectContinueTimeout + expectContinueTimeout: 2s # download grpc option downloadGRPC: # security option @@ -55,6 +123,8 @@ download: # download service listen address # current, only support unix domain socket unixListen: + # in linux, default value is /var/run/dfdaemon.sock + # in macos(just for testing), default value is /tmp/dfdaemon.sock socket: /var/run/dfdaemon.sock # peer grpc option # peer grpc service send pieces info to other peers @@ -75,14 +145,10 @@ download: # start: 65000 # end: 65009 -# proxy config file location or detail config -# proxy: "" - # upload service option upload: - defaultFilter: "Expires&Signature" # upload limit per second - rateLimit: 200Mi + rateLimit: 100Mi security: insecure: true cacert: "" @@ -103,19 +169,42 @@ upload: storage: # task data expire time # when there is no access to a task data, this task will be gc. - taskExpireTime: 1m0s + taskExpireTime: 6h + # storage strategy when process task data + # io.d7y.storage.v2.simple : download file to data directory first, then copy to output path, this is default action + # the download file in date directory will be the peer data for uploading to other peers + # io.d7y.storage.v2.advance: download file directly to output path with postfix, hard link to final output, + # avoid copy to output path, fast than simple strategy, but: + # the output file with postfix will be the peer data for uploading to other peers + # when user delete or change this file, this peer data will be corrupted + # default is io.d7y.storage.v2.advance + strategy: io.d7y.storage.v2.advance + # disk quota gc threshold, when the quota of all tasks exceeds the gc threshold, the oldest tasks will be reclaimed. + diskGCThreshold: 50Gi + # disk used percent gc threshold, when the disk used percent exceeds, the oldest tasks will be reclaimed. + # eg, diskGCThresholdPercent=80, when the disk usage is above 80%, start to gc the oldest tasks + diskGCThresholdPercent: 80 + # set to ture for reusing underlying storage for same task id multiplex: true -# proxy service option +# proxy service config file location or detail config +# proxy: "" + +# proxy service detail option proxy: + # filter for hash url + # when defaultFilter: "Expires&Signature", for example: + # http://localhost/xyz?Expires=111&Signature=222 and http://localhost/xyz?Expires=333&Signature=999 + # is same task + defaultFilter: "Expires&Signature" security: insecure: true cacert: "" cert: "" key: "" tcpListen: - # Namespace stands the linux net namespace, like /proc/1/ns/net - # It's useful for running daemon in pod with ip allocated and listen in host + # namespace stands the linux net namespace, like /proc/1/ns/net + # it's useful for running daemon in pod with ip allocated and listening the special port in host net namespace # Linux only namespace: "" # listen address @@ -128,16 +217,51 @@ proxy: # start: 65020 # end: 65029 registryMirror: + # when enable, using header "X-Dragonfly-Registry" for remote instead of url + dynamic: true # url for the registry mirror url: https://index.docker.io # whether to ignore https certificate errors insecure: true # optional certificates if the remote server uses self-signed certificates - certs: [ ] + certs: [] # whether to request the remote registry directly direct: false + # whether to use proxies to decide if dragonfly should be used + useProxies: false proxies: - # proxy all http image layer download requests with dfget - - regx: (blobs|manifests|config)/sha256.* - - regx: __IP__.* # test only + # proxy all http image layer download requests with dfget + - regx: blobs/sha256.* + # change http requests to some-registry to https and proxy them with dfget + - regx: some-registry/ + useHTTPS: true + # proxy requests directly, without dfget + - regx: no-proxy-reg + direct: true + # proxy requests with redirect + - regx: some-registry + redirect: another-registry + # the same with url rewrite like apache ProxyPass directive + - regx: ^http://some-registry/(.*) + redirect: http://another-registry/$1 + + hijackHTTPS: + # key pair used to hijack https requests + cert: "" + key: "" + hosts: + - regx: mirror.aliyuncs.com:443 # regexp to match request hosts + # whether to ignore https certificate errors + insecure: true + # optional certificates if the host uses self-signed certificates + certs: [] + # max tasks to download same time, 0 is no limit + maxConcurrency: 0 + whiteList: + # the host of the whitelist + - host: "" + # match whitelist hosts + regx: + # port that need to be added to the whitelist + ports: \ No newline at end of file diff --git a/deploy/docker-compose/template/manager.template.yaml b/deploy/docker-compose/template/manager.template.yaml new file mode 100644 index 000000000..5bbf6b921 --- /dev/null +++ b/deploy/docker-compose/template/manager.template.yaml @@ -0,0 +1,73 @@ +# current server info used for server +server: + # grpc server configure + grpc: + # listen address + listen: __IP__ + # listen port, manager will try to listen + # when this port is not available, manager will try next port + port: + start: 65003 + end: 65003 + # rest server configure + rest: + # stand address + addr: :8080 + # front-end console resource path + # publicPath: /dist + +# database info used for server +database: + # mysql configure + mysql: + user: dragonfly + password: dragonfly + host: __IP__ + port: 3306 + dbname: manager + migrate: true + # tls: + # # client certificate file path + # cert: /etc/ssl/certs/cert.pem + # # client key file path + # key: /etc/ssl/private/key.pem + # # ca file path + # ca: /etc/ssl/certs/ca.pem + # # whether a client verifies the server's certificate chain and host name. + # insecureSkipVerify: true + # redis configure + redis: + password: dragonfly + host: __IP__ + port: 6379 + db: 0 +# manager server cache +# cache: +# # redis cache configure +# redis: +# # cache ttl configure +# ttl: 30s +# # local cache configure +# local: +# # lfu cache size +# size: 10000 +# # cache ttl configure +# ttl: 30s + +# enable prometheus metrics +# metrics: +# # metrics service address +# addr: ":8000" + +# console shows log on console +console: false + +# whether to enable debug level logger and enable pprof +verbose: false + +# listen port for pprof, only valid when the verbose option is true +# default is -1. If it is 0, pprof will use a random port. +pprof-port: -1 + +# jaeger endpoint url, like: http://jaeger.dragonfly.svc:14268/api/traces +jaeger: "" \ No newline at end of file diff --git a/deploy/docker-compose/template/scheduler.template.yaml b/deploy/docker-compose/template/scheduler.template.yaml new file mode 100644 index 000000000..c91fed701 --- /dev/null +++ b/deploy/docker-compose/template/scheduler.template.yaml @@ -0,0 +1,130 @@ +# server scheduler instance configuration +server: + # # ip + ip: 0.0.0.0 + # # host + # host: localhost + # port is the ip and port scheduler server listens on. + port: 8002 + # limit the number of requests + listenLimit: 10000 + # cacheDir is dynconfig cache storage directory + # in linux, default value is /var/cache/dragonfly + # in macos(just for testing), default value is /Users/$USER/.dragonfly/cache + cacheDir: "" + # logDir is the log storage directory + # in linux, default value is /var/log/dragonfly + # in macos(just for testing), default value is /Users/$USER/.dragonfly/logs + logDir: "" + +# scheduler policy configuration +scheduler: + # algorithm configuration to use different scheduling algorithms, + # default configuration supports "default" and "ml" + # "default" is the rule-based scheduling algorithm, + # "ml" is the machine learning scheduling algorithm + # It also supports user plugin extension, the algorithm value is "plugin", + # and the compiled `d7y-scheduler-plugin-evaluator.so` file is added to + # the dragonfly working directory plugins + algorithm: default + # backSourceCount is the number of backsource clients + # when the CDN is unavailable + backSourceCount: 3 + # retry scheduling back-to-source limit times + retryBackSourceLimit: 5 + # retry scheduling limit times + retryLimit: 20 + # retry scheduling interval + retryInterval: 200ms + # gc metadata configuration + gc: + # peerGCInterval is peer's gc interval + peerGCInterval: 10m + # peerTTL is peer's TTL duration + peerTTL: 24h + # taskGCInterval is task's gc interval + taskGCInterval: 10m + # taskTTL is task's TTL duration + taskTTL: 24h + # hostGCInterval is host's gc interval + hostGCInterval: 30m + # hostTTL is host's TTL duration + hostTTL: 48h + +# dynamic data configuration +dynConfig: + # dynamic config refresh interval + refreshInterval: 1m + +# scheduler host configuration +host: + # idc is the idc of scheduler instance + idc: "" + # netTopology is the net topology of scheduler instance + netTopology: "" + # location is the location of scheduler instance + location: "" + +# manager configuration +manager: + # addr manager access address + addr: "__IP__:65003" + # schedulerClusterID cluster id to which scheduler instance belongs + schedulerClusterID: "1" + # keepAlive keep alive configuration + keepAlive: + # interval + interval: 5s + +# cdn configuration +cdn: + # scheduler enable cdn as P2P peer, + # if the value is false, P2P network will not be back-to-source through + # cdn but by dfdaemon and preheat feature does not work + enable: true + +# machinery async job configuration, +# see https://github.com/RichardKnop/machinery +job: + # scheduler enable job service + enable: false + # number of workers in global queue + globalWorkerNum: 1 + # number of workers in scheduler queue + schedulerWorkerNum: 1 + # number of workers in local queue + localWorkerNum: 5 + # redis configuration + redis: + # host + host: "__IP__" + # port + port: 6379 + # password + password: "" + # brokerDB + brokerDB: 1 + # backendDB + backendDB: 2 + +# enable prometheus metrics +metrics: + # scheduler enable metrics service + enable: false + # metrics service address + addr: ":8000" + # enable peer host metrics + enablePeerHost: false + +# console shows log on console +console: true + +# whether to enable debug level logger and enable pprof +verbose: true + +# listen port for pprof, only valid when the verbose option is true +# default is -1. If it is 0, pprof will use a random port. +pprof-port: -1 + +# jaeger endpoint url, like: http://jaeger.dragonfly.svc:14268/api/traces +jaeger: "" \ No newline at end of file