feat: add logLevel and remove verbose (#4157)

Signed-off-by: Gaius <gaius.qi@gmail.com>
This commit is contained in:
Gaius 2025-06-24 15:27:28 +08:00 committed by GitHub
parent 530ce0e596
commit 4e65b41202
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
49 changed files with 69 additions and 111 deletions

View File

@ -16,10 +16,8 @@ dfcache delete <\-i cid> [flags]
\-\-config string the path of configuration file with yaml extension name, default is /etc/dragonfly/dfcache.yaml, it can also be set by env var: DFCACHE_CONFIG \-\-config string the path of configuration file with yaml extension name, default is /etc/dragonfly/dfcache.yaml, it can also be set by env var: DFCACHE_CONFIG
\-\-console whether logger output records to the stdout \-\-console whether logger output records to the stdout
\-\-logdir string Dfcache log directory \-\-logdir string Dfcache log directory
\-\-pprof\-port int listen port for pprof(default \-1)
\-t, \-\-tag string different tags for the same cid will be recognized as different files in P2P network \-t, \-\-tag string different tags for the same cid will be recognized as different files in P2P network
\-\-timeout duration Timeout for this cache operation, 0 is infinite \-\-timeout duration Timeout for this cache operation, 0 is infinite
\-\-verbose whether logger use debug level
\-\-workhome string Dfcache working directory \-\-workhome string Dfcache working directory
\-h, \-\-help help for delete \-h, \-\-help help for delete
.EE .EE

View File

@ -16,10 +16,8 @@ dfcache doc [flags]
\-\-config string the path of configuration file with yaml extension name, default is /etc/dragonfly/dfcache.yaml, it can also be set by env var: DFCACHE_CONFIG \-\-config string the path of configuration file with yaml extension name, default is /etc/dragonfly/dfcache.yaml, it can also be set by env var: DFCACHE_CONFIG
\-\-console whether logger output records to the stdout \-\-console whether logger output records to the stdout
\-\-logdir string Dfcache log directory \-\-logdir string Dfcache log directory
\-\-pprof\-port int listen port for pprof(default \-1)
\-t, \-\-tag string different tags for the same cid will be recognized as different files in P2P network \-t, \-\-tag string different tags for the same cid will be recognized as different files in P2P network
\-\-timeout duration Timeout for this cache operation, 0 is infinite \-\-timeout duration Timeout for this cache operation, 0 is infinite
\-\-verbose whether logger use debug level
\-\-workhome string Dfcache working directory \-\-workhome string Dfcache working directory
\-h, \-\-help help for doc \-h, \-\-help help for doc
\-\-path string destination dir of generated markdown documents (default \(dq./\(dq) \-\-path string destination dir of generated markdown documents (default \(dq./\(dq)

View File

@ -16,10 +16,8 @@ dfcache export <\-i cid> <output>|<\-O output> [flags]
\-\-config string the path of configuration file with yaml extension name, default is /etc/dragonfly/dfcache.yaml, it can also be set by env var: DFCACHE_CONFIG \-\-config string the path of configuration file with yaml extension name, default is /etc/dragonfly/dfcache.yaml, it can also be set by env var: DFCACHE_CONFIG
\-\-console whether logger output records to the stdout \-\-console whether logger output records to the stdout
\-\-logdir string Dfcache log directory \-\-logdir string Dfcache log directory
\-\-pprof\-port int listen port for pprof(default \-1) \-t, \-\-tag string different tags for the same cid will be recognized as different files in P3P network
\-t, \-\-tag string different tags for the same cid will be recognized as different files in P2P network
\-\-timeout duration Timeout for this cache operation, 0 is infinite \-\-timeout duration Timeout for this cache operation, 0 is infinite
\-\-verbose whether logger use debug level
\-\-workhome string Dfcache working directory \-\-workhome string Dfcache working directory
\-h, \-\-help help for export \-h, \-\-help help for export
\-l, \-\-local only export file from local cache \-l, \-\-local only export file from local cache

View File

@ -16,10 +16,8 @@ dfcache import <\-i cid> <file>|<\-I file> [flags]
\-\-config string the path of configuration file with yaml extension name, default is /etc/dragonfly/dfcache.yaml, it can also be set by env var: DFCACHE_CONFIG \-\-config string the path of configuration file with yaml extension name, default is /etc/dragonfly/dfcache.yaml, it can also be set by env var: DFCACHE_CONFIG
\-\-console whether logger output records to the stdout \-\-console whether logger output records to the stdout
\-\-logdir string Dfcache log directory \-\-logdir string Dfcache log directory
\-\-pprof\-port int listen port for pprof(default \-1)
\-t, \-\-tag string different tags for the same cid will be recognized as different files in P2P network \-t, \-\-tag string different tags for the same cid will be recognized as different files in P2P network
\-\-timeout duration Timeout for this cache operation, 0 is infinite \-\-timeout duration Timeout for this cache operation, 0 is infinite
\-\-verbose whether logger use debug level
\-\-workhome string Dfcache working directory \-\-workhome string Dfcache working directory
\-h, \-\-help help for import \-h, \-\-help help for import
\-I, \-\-input string import the given file into P2P network \-I, \-\-input string import the given file into P2P network

View File

@ -16,10 +16,8 @@ dfcache plugin [flags]
\-\-config string the path of configuration file with yaml extension name, default is /etc/dragonfly/dfcache.yaml, it can also be set by env var: DFCACHE_CONFIG \-\-config string the path of configuration file with yaml extension name, default is /etc/dragonfly/dfcache.yaml, it can also be set by env var: DFCACHE_CONFIG
\-\-console whether logger output records to the stdout \-\-console whether logger output records to the stdout
\-\-logdir string Dfcache log directory \-\-logdir string Dfcache log directory
\-\-pprof\-port int listen port for pprof(default \-1)
\-t, \-\-tag string different tags for the same cid will be recognized as different files in P2P network \-t, \-\-tag string different tags for the same cid will be recognized as different files in P2P network
\-\-timeout duration Timeout for this cache operation, 0 is infinite \-\-timeout duration Timeout for this cache operation, 0 is infinite
\-\-verbose whether logger use debug level
\-\-workhome string Dfcache working directory \-\-workhome string Dfcache working directory
\-h, \-\-help help for plugin \-h, \-\-help help for plugin
.EE .EE

View File

@ -17,10 +17,8 @@ dfcache stat <\-i cid> [flags]
\-\-config string the path of configuration file with yaml extension name, default is /etc/dragonfly/dfcache.yaml, it can also be set by env var: DFCACHE_CONFIG \-\-config string the path of configuration file with yaml extension name, default is /etc/dragonfly/dfcache.yaml, it can also be set by env var: DFCACHE_CONFIG
\-\-console whether logger output records to the stdout \-\-console whether logger output records to the stdout
\-\-logdir string Dfcache log directory \-\-logdir string Dfcache log directory
\-\-pprof\-port int listen port for pprof(default \-1)
\-t, \-\-tag string different tags for the same cid will be recognized as different files in P2P network \-t, \-\-tag string different tags for the same cid will be recognized as different files in P2P network
\-\-timeout duration Timeout for this cache operation, 0 is infinite \-\-timeout duration Timeout for this cache operation, 0 is infinite
\-\-verbose whether logger use debug level
\-\-workhome string Dfcache working directory \-\-workhome string Dfcache working directory
\-h, \-\-help help for stat \-h, \-\-help help for stat
\-l, \-\-local only check task exists locally, and don\(aqt check other peers in P2P network \-l, \-\-local only check task exists locally, and don\(aqt check other peers in P2P network

View File

@ -16,10 +16,8 @@ dfcache version [flags]
\-\-config string the path of configuration file with yaml extension name, default is /etc/dragonfly/dfcache.yaml, it can also be set by env var: DFCACHE_CONFIG \-\-config string the path of configuration file with yaml extension name, default is /etc/dragonfly/dfcache.yaml, it can also be set by env var: DFCACHE_CONFIG
\-\-console whether logger output records to the stdout \-\-console whether logger output records to the stdout
\-\-logdir string Dfcache log directory \-\-logdir string Dfcache log directory
\-\-pprof\-port int listen port for pprof(default \-1)
\-t, \-\-tag string different tags for the same cid will be recognized as different files in P2P network \-t, \-\-tag string different tags for the same cid will be recognized as different files in P2P network
\-\-timeout duration Timeout for this cache operation, 0 is infinite \-\-timeout duration Timeout for this cache operation, 0 is infinite
\-\-verbose whether logger use debug level
\-\-workhome string Dfcache working directory \-\-workhome string Dfcache working directory
\-h, \-\-help help for version \-h, \-\-help help for version
.EE .EE

View File

@ -23,10 +23,8 @@ network.
\-\-console whether logger output records to the stdout \-\-console whether logger output records to the stdout
\-h, \-\-help help for dfcache \-h, \-\-help help for dfcache
\-\-logdir string Dfcache log directory \-\-logdir string Dfcache log directory
\-\-pprof\-port int listen port for pprof(default \-1)
\-t, \-\-tag string different tags for the same cid will be recognized as different files in P2P network \-t, \-\-tag string different tags for the same cid will be recognized as different files in P2P network
\-\-timeout duration Timeout for this cache operation, 0 is infinite \-\-timeout duration Timeout for this cache operation, 0 is infinite
\-\-verbose whether logger use debug level
\-\-workhome string Dfcache working directory \-\-workhome string Dfcache working directory
.EE .EE
.SH SEE ALSO .SH SEE ALSO

View File

@ -23,10 +23,8 @@ responsibility to go back to source and add file into P2P network.
--console whether logger output records to the stdout --console whether logger output records to the stdout
-h, --help help for dfcache -h, --help help for dfcache
--logdir string Dfcache log directory --logdir string Dfcache log directory
--pprof-port int listen port for pprof(default -1)
-t, --tag string different tags for the same cid will be recognized as different files in P2P network -t, --tag string different tags for the same cid will be recognized as different files in P2P network
--timeout duration Timeout for this cache operation, 0 is infinite --timeout duration Timeout for this cache operation, 0 is infinite
--verbose whether logger use debug level
--workhome string Dfcache working directory --workhome string Dfcache working directory
``` ```

View File

@ -19,10 +19,8 @@ dfcache delete <-i cid> [flags]
--config string the path of configuration file with yaml extension name, default is /etc/dragonfly/dfcache.yaml, it can also be set by env var: DFCACHE_CONFIG --config string the path of configuration file with yaml extension name, default is /etc/dragonfly/dfcache.yaml, it can also be set by env var: DFCACHE_CONFIG
--console whether logger output records to the stdout --console whether logger output records to the stdout
--logdir string Dfcache log directory --logdir string Dfcache log directory
--pprof-port int listen port for pprof(default -1)
-t, --tag string different tags for the same cid will be recognized as different files in P2P network -t, --tag string different tags for the same cid will be recognized as different files in P2P network
--timeout duration Timeout for this cache operation, 0 is infinite --timeout duration Timeout for this cache operation, 0 is infinite
--verbose whether logger use debug level
--workhome string Dfcache working directory --workhome string Dfcache working directory
-h, --help help for delete -h, --help help for delete
``` ```

View File

@ -19,10 +19,8 @@ dfcache doc [flags]
--config string the path of configuration file with yaml extension name, default is /etc/dragonfly/dfcache.yaml, it can also be set by env var: DFCACHE_CONFIG --config string the path of configuration file with yaml extension name, default is /etc/dragonfly/dfcache.yaml, it can also be set by env var: DFCACHE_CONFIG
--console whether logger output records to the stdout --console whether logger output records to the stdout
--logdir string Dfcache log directory --logdir string Dfcache log directory
--pprof-port int listen port for pprof(default -1)
-t, --tag string different tags for the same cid will be recognized as different files in P2P network -t, --tag string different tags for the same cid will be recognized as different files in P2P network
--timeout duration Timeout for this cache operation, 0 is infinite --timeout duration Timeout for this cache operation, 0 is infinite
--verbose whether logger use debug level
--workhome string Dfcache working directory --workhome string Dfcache working directory
-h, --help help for doc -h, --help help for doc
--path string destination dir of generated markdown documents (default "./") --path string destination dir of generated markdown documents (default "./")

View File

@ -19,10 +19,8 @@ dfcache export <-i cid> <output>|<-O output> [flags]
--config string the path of configuration file with yaml extension name, default is /etc/dragonfly/dfcache.yaml, it can also be set by env var: DFCACHE_CONFIG --config string the path of configuration file with yaml extension name, default is /etc/dragonfly/dfcache.yaml, it can also be set by env var: DFCACHE_CONFIG
--console whether logger output records to the stdout --console whether logger output records to the stdout
--logdir string Dfcache log directory --logdir string Dfcache log directory
--pprof-port int listen port for pprof(default -1) -t, --tag string different tags for the same cid will be recognized as different files in P3P network
-t, --tag string different tags for the same cid will be recognized as different files in P2P network
--timeout duration Timeout for this cache operation, 0 is infinite --timeout duration Timeout for this cache operation, 0 is infinite
--verbose whether logger use debug level
--workhome string Dfcache working directory --workhome string Dfcache working directory
-h, --help help for export -h, --help help for export
-l, --local only export file from local cache -l, --local only export file from local cache

View File

@ -19,10 +19,8 @@ dfcache import <-i cid> <file>|<-I file> [flags]
--config string the path of configuration file with yaml extension name, default is /etc/dragonfly/dfcache.yaml, it can also be set by env var: DFCACHE_CONFIG --config string the path of configuration file with yaml extension name, default is /etc/dragonfly/dfcache.yaml, it can also be set by env var: DFCACHE_CONFIG
--console whether logger output records to the stdout --console whether logger output records to the stdout
--logdir string Dfcache log directory --logdir string Dfcache log directory
--pprof-port int listen port for pprof(default -1)
-t, --tag string different tags for the same cid will be recognized as different files in P2P network -t, --tag string different tags for the same cid will be recognized as different files in P2P network
--timeout duration Timeout for this cache operation, 0 is infinite --timeout duration Timeout for this cache operation, 0 is infinite
--verbose whether logger use debug level
--workhome string Dfcache working directory --workhome string Dfcache working directory
-h, --help help for import -h, --help help for import
-I, --input string import the given file into P2P network -I, --input string import the given file into P2P network

View File

@ -19,10 +19,8 @@ dfcache plugin [flags]
--config string the path of configuration file with yaml extension name, default is /etc/dragonfly/dfcache.yaml, it can also be set by env var: DFCACHE_CONFIG --config string the path of configuration file with yaml extension name, default is /etc/dragonfly/dfcache.yaml, it can also be set by env var: DFCACHE_CONFIG
--console whether logger output records to the stdout --console whether logger output records to the stdout
--logdir string Dfcache log directory --logdir string Dfcache log directory
--pprof-port int listen port for pprof(default -1)
-t, --tag string different tags for the same cid will be recognized as different files in P2P network -t, --tag string different tags for the same cid will be recognized as different files in P2P network
--timeout duration Timeout for this cache operation, 0 is infinite --timeout duration Timeout for this cache operation, 0 is infinite
--verbose whether logger use debug level
--workhome string Dfcache working directory --workhome string Dfcache working directory
-h, --help help for plugin -h, --help help for plugin
``` ```

View File

@ -19,10 +19,8 @@ dfcache stat <-i cid> [flags]
--config string the path of configuration file with yaml extension name, default is /etc/dragonfly/dfcache.yaml, it can also be set by env var: DFCACHE_CONFIG --config string the path of configuration file with yaml extension name, default is /etc/dragonfly/dfcache.yaml, it can also be set by env var: DFCACHE_CONFIG
--console whether logger output records to the stdout --console whether logger output records to the stdout
--logdir string Dfcache log directory --logdir string Dfcache log directory
--pprof-port int listen port for pprof(default -1)
-t, --tag string different tags for the same cid will be recognized as different files in P2P network -t, --tag string different tags for the same cid will be recognized as different files in P2P network
--timeout duration Timeout for this cache operation, 0 is infinite --timeout duration Timeout for this cache operation, 0 is infinite
--verbose whether logger use debug level
--workhome string Dfcache working directory --workhome string Dfcache working directory
-h, --help help for stat -h, --help help for stat
-l, --local only check task exists locally, and don't check other peers in P2P network -l, --local only check task exists locally, and don't check other peers in P2P network

View File

@ -19,10 +19,8 @@ dfcache version [flags]
--config string the path of configuration file with yaml extension name, default is /etc/dragonfly/dfcache.yaml, it can also be set by env var: DFCACHE_CONFIG --config string the path of configuration file with yaml extension name, default is /etc/dragonfly/dfcache.yaml, it can also be set by env var: DFCACHE_CONFIG
--console whether logger output records to the stdout --console whether logger output records to the stdout
--logdir string Dfcache log directory --logdir string Dfcache log directory
--pprof-port int listen port for pprof(default -1)
-t, --tag string different tags for the same cid will be recognized as different files in P2P network -t, --tag string different tags for the same cid will be recognized as different files in P2P network
--timeout duration Timeout for this cache operation, 0 is infinite --timeout duration Timeout for this cache operation, 0 is infinite
--verbose whether logger use debug level
--workhome string Dfcache working directory --workhome string Dfcache working directory
-h, --help help for version -h, --help help for version
``` ```

View File

@ -32,7 +32,6 @@ and so on.
\-\-original\-offset Range request only. Download ranged data into target file with original offset. Daemon will make a hardlink to target file. Client can download many ranged data into one file for same url. When enabled, back source in client will be disabled \-\-original\-offset Range request only. Download ranged data into target file with original offset. Daemon will make a hardlink to target file. Client can download many ranged data into one file for same url. When enabled, back source in client will be disabled
\-O, \-\-output string Destination path which is used to store the downloaded file, it must be a full path \-O, \-\-output string Destination path which is used to store the downloaded file, it must be a full path
\-P, \-\-priority string Scheduler will schedule task according to priority \-P, \-\-priority string Scheduler will schedule task according to priority
\-\-pprof\-port int listen port for pprof(default \-1)
\-\-range string Download range. Like: 0\-9, stands download 10 bytes from 0 \-9, [0:9] in real url \-\-range string Download range. Like: 0\-9, stands download 10 bytes from 0 \-9, [0:9] in real url
\-\-ratelimit string The downloading network bandwidth limit per second in format of G(B)/g/M(B)/m/K(B)/k/B, pure number will be parsed as Byte, 0 is infinite (default \(dq100.0MB\(dq) \-\-ratelimit string The downloading network bandwidth limit per second in format of G(B)/g/M(B)/m/K(B)/k/B, pure number will be parsed as Byte, 0 is infinite (default \(dq100.0MB\(dq)
\-r, \-\-recursive Recursively download all resources in target url, the target source client must support list action \-r, \-\-recursive Recursively download all resources in target url, the target source client must support list action
@ -41,7 +40,6 @@ and so on.
\-\-tag string Different tags for the same url will be divided into different P2P overlay, it conflicts with \-\-digest \-\-tag string Different tags for the same url will be divided into different P2P overlay, it conflicts with \-\-digest
\-\-timeout duration Timeout for the downloading task, 0 is infinite \-\-timeout duration Timeout for the downloading task, 0 is infinite
\-u, \-\-url string Download one file from the url, equivalent to the command\(aqs first position argument \-u, \-\-url string Download one file from the url, equivalent to the command\(aqs first position argument
\-\-verbose whether logger use debug level
\-\-workhome string Dfget working directory \-\-workhome string Dfget working directory
.EE .EE
.SH BUGS .SH BUGS

View File

@ -29,7 +29,6 @@ functionality, such as network bandwidth limit, transmission encryption and so o
--original-offset Range request only. Download ranged data into target file with original offset. Daemon will make a hardlink to target file. Client can download many ranged data into one file for same url. When enabled, back source in client will be disabled --original-offset Range request only. Download ranged data into target file with original offset. Daemon will make a hardlink to target file. Client can download many ranged data into one file for same url. When enabled, back source in client will be disabled
-O, --output string Destination path which is used to store the downloaded file, it must be a full path -O, --output string Destination path which is used to store the downloaded file, it must be a full path
-P, --priority string Scheduler will schedule task according to priority -P, --priority string Scheduler will schedule task according to priority
--pprof-port int listen port for pprof(default -1)
--range string Download range. Like: 0-9, stands download 10 bytes from 0 -9, [0:9] in real url --range string Download range. Like: 0-9, stands download 10 bytes from 0 -9, [0:9] in real url
--ratelimit string The downloading network bandwidth limit per second in format of G(B)/g/M(B)/m/K(B)/k/B, pure number will be parsed as Byte, 0 is infinite (default "100.0MB") --ratelimit string The downloading network bandwidth limit per second in format of G(B)/g/M(B)/m/K(B)/k/B, pure number will be parsed as Byte, 0 is infinite (default "100.0MB")
-r, --recursive Recursively download all resources in target url, the target source client must support list action -r, --recursive Recursively download all resources in target url, the target source client must support list action
@ -38,7 +37,6 @@ functionality, such as network bandwidth limit, transmission encryption and so o
--tag string Different tags for the same url will be divided into different P2P overlay, it conflicts with --digest --tag string Different tags for the same url will be divided into different P2P overlay, it conflicts with --digest
--timeout duration Timeout for the downloading task, 0 is infinite --timeout duration Timeout for the downloading task, 0 is infinite
-u, --url string Download one file from the url, equivalent to the command's first position argument -u, --url string Download one file from the url, equivalent to the command's first position argument
--verbose whether logger use debug level
--workhome string Dfget working directory --workhome string Dfget working directory
``` ```

View File

@ -25,8 +25,6 @@ scheduler:
scheduleTimeout: 10s scheduleTimeout: 10s
# when enabled, pprof will be enabled, # when enabled, pprof will be enabled,
verbose: true
log-level: debug
console: false console: false
# current host info used for scheduler # current host info used for scheduler

View File

@ -52,6 +52,9 @@ type CacheOption struct {
// LogDir is log directory of dfcache. // LogDir is log directory of dfcache.
LogDir string `yaml:"logDir,omitempty" mapstructure:"logDir,omitempty"` LogDir string `yaml:"logDir,omitempty" mapstructure:"logDir,omitempty"`
// LogLevel is log level of dfcache, supported values are "debug", "info", "warn", "error", "panic", "fatal".
LogLevel string `yaml:"logLevel,omitempty" mapstructure:"logLevel,omitempty"`
// Maximum size in megabytes of log files before rotation (default: 1024) // Maximum size in megabytes of log files before rotation (default: 1024)
LogMaxSize int `yaml:"logMaxSize" mapstructure:"logMaxSize"` LogMaxSize int `yaml:"logMaxSize" mapstructure:"logMaxSize"`

View File

@ -100,6 +100,9 @@ type ClientOption struct {
// LogDir is log directory of dfget. // LogDir is log directory of dfget.
LogDir string `yaml:"logDir,omitempty" mapstructure:"logDir,omitempty"` LogDir string `yaml:"logDir,omitempty" mapstructure:"logDir,omitempty"`
// LogLevel is log level of dfget, supported values are "debug", "info", "warn", "error", "panic", "fatal".
LogLevel string `yaml:"logLevel,omitempty" mapstructure:"logLevel,omitempty"`
// Maximum size in megabytes of log files before rotation (default: 1024) // Maximum size in megabytes of log files before rotation (default: 1024)
LogMaxSize int `yaml:"logMaxSize" mapstructure:"logMaxSize"` LogMaxSize int `yaml:"logMaxSize" mapstructure:"logMaxSize"`

View File

@ -62,6 +62,7 @@ type DaemonOption struct {
CacheDir string `mapstructure:"cacheDir" yaml:"cacheDir"` CacheDir string `mapstructure:"cacheDir" yaml:"cacheDir"`
CacheDirMode uint32 `mapstructure:"cacheDirMode" yaml:"cacheDirMode"` CacheDirMode uint32 `mapstructure:"cacheDirMode" yaml:"cacheDirMode"`
LogDir string `mapstructure:"logDir" yaml:"logDir"` LogDir string `mapstructure:"logDir" yaml:"logDir"`
LogLevel string `mapstructure:"logLevel" yaml:"logLevel"`
LogMaxSize int `yaml:"logMaxSize" mapstructure:"logMaxSize"` LogMaxSize int `yaml:"logMaxSize" mapstructure:"logMaxSize"`
LogMaxAge int `yaml:"logMaxAge" mapstructure:"logMaxAge"` LogMaxAge int `yaml:"logMaxAge" mapstructure:"logMaxAge"`
LogMaxBackups int `yaml:"logMaxBackups" mapstructure:"logMaxBackups"` LogMaxBackups int `yaml:"logMaxBackups" mapstructure:"logMaxBackups"`

View File

@ -234,7 +234,6 @@ func TestPeerHostOption_Load(t *testing.T) {
peerHostOption := &DaemonOption{ peerHostOption := &DaemonOption{
Options: base.Options{ Options: base.Options{
Console: true, Console: true,
Verbose: true,
PProfPort: -1, PProfPort: -1,
Tracing: base.TracingConfig{ Tracing: base.TracingConfig{
Protocol: "grpc", Protocol: "grpc",

View File

@ -1,6 +1,5 @@
console: true console: true
verbose: true pprofPort: -1
pprof-port: -1
tracing: tracing:
protocol: "grpc" protocol: "grpc"
endpoint: "localhost:4317" endpoint: "localhost:4317"

View File

@ -147,9 +147,7 @@ func (o *objectStorage) Stop() error {
// Initialize router of gin. // Initialize router of gin.
func (o *objectStorage) initRouter(cfg *config.DaemonOption, logDir string) *gin.Engine { func (o *objectStorage) initRouter(cfg *config.DaemonOption, logDir string) *gin.Engine {
// Set mode. // Set mode.
if !cfg.Verbose {
gin.SetMode(gin.ReleaseMode) gin.SetMode(gin.ReleaseMode)
}
// Logging to a file. // Logging to a file.
if !cfg.Console { if !cfg.Console {

View File

@ -148,9 +148,7 @@ func (um *uploadManager) Stop() error {
// Initialize router of gin. // Initialize router of gin.
func (um *uploadManager) initRouter(cfg *config.DaemonOption, logDir string) *gin.Engine { func (um *uploadManager) initRouter(cfg *config.DaemonOption, logDir string) *gin.Engine {
// Set mode // Set mode
if !cfg.Verbose {
gin.SetMode(gin.ReleaseMode) gin.SetMode(gin.ReleaseMode)
}
r := gin.New() r := gin.New()

View File

@ -18,9 +18,7 @@ package base
type Options struct { type Options struct {
Console bool `yaml:"console" mapstructure:"console"` Console bool `yaml:"console" mapstructure:"console"`
Verbose bool `yaml:"verbose" mapstructure:"verbose"` PProfPort int `yaml:"pprofPort" mapstructure:"pprofPort"`
LogLevel string `yaml:"log-level" mapstructure:"log-level"`
PProfPort int `yaml:"pprof-port" mapstructure:"pprof-port"`
Tracing TracingConfig `yaml:"tracing" mapstructure:"tracing"` Tracing TracingConfig `yaml:"tracing" mapstructure:"tracing"`
} }

View File

@ -71,9 +71,6 @@ func InitCommandAndConfig(cmd *cobra.Command, useConfigFile bool, config any) {
// Add common flags // Add common flags
flags := cmd.PersistentFlags() flags := cmd.PersistentFlags()
flags.Bool("console", false, "whether logger output records to the stdout") flags.Bool("console", false, "whether logger output records to the stdout")
flags.Bool("verbose", false, "whether logger use debug level")
flags.String("log-level", "", "use specific log level(debug, info, warn, error), take precedence over verbose flag")
flags.Int("pprof-port", -1, "listen port for pprof, 0 represents random port")
flags.String("config", "", fmt.Sprintf("the path of configuration file with yaml extension name, default is %s, it can also be set by env var: %s", filepath.Join(dfpath.DefaultConfigDir, rootName+".yaml"), strings.ToUpper(rootName+"_config"))) flags.String("config", "", fmt.Sprintf("the path of configuration file with yaml extension name, default is %s, it can also be set by env var: %s", filepath.Join(dfpath.DefaultConfigDir, rootName+".yaml"), strings.ToUpper(rootName+"_config")))
// Bind common flags // Bind common flags

View File

@ -83,6 +83,7 @@ func init() {
flags.Duration("timeout", dfcacheConfig.Timeout, "Timeout for this cache operation, 0 is infinite") flags.Duration("timeout", dfcacheConfig.Timeout, "Timeout for this cache operation, 0 is infinite")
flags.String("workhome", dfcacheConfig.WorkHome, "Dfcache working directory") flags.String("workhome", dfcacheConfig.WorkHome, "Dfcache working directory")
flags.String("logdir", dfcacheConfig.LogDir, "Dfcache log directory") flags.String("logdir", dfcacheConfig.LogDir, "Dfcache log directory")
flags.String("logLevel", dfcacheConfig.LogLevel, "Dfcache log level, one of: debug, info, warn, error, fatal, panic")
flags.String("daemon-sock", dfcacheConfig.DaemonSock, "Dfdaemon socket path to connect") flags.String("daemon-sock", dfcacheConfig.DaemonSock, "Dfdaemon socket path to connect")
// Bind common flags // Bind common flags
@ -142,7 +143,7 @@ func runDfcacheSubcmd(ctx context.Context, cmdName string, args []string) error
MaxBackups: dfcacheConfig.LogMaxBackups} MaxBackups: dfcacheConfig.LogMaxBackups}
// Initialize logger // Initialize logger
if err := logger.InitDfcache(dfcacheConfig.Verbose, dfcacheConfig.LogLevel, d.LogDir(), rotateConfig); err != nil { if err := logger.InitDfcache(dfcacheConfig.LogLevel, d.LogDir(), rotateConfig); err != nil {
return fmt.Errorf("init client dfcache logger: %w", err) return fmt.Errorf("init client dfcache logger: %w", err)
} }
logger.Infof("version:\n%s", version.Version()) logger.Infof("version:\n%s", version.Version())

View File

@ -80,7 +80,7 @@ it supports container engine, wget and other downloading tools through proxy fun
MaxBackups: cfg.LogMaxBackups} MaxBackups: cfg.LogMaxBackups}
// Initialize logger // Initialize logger
if err := logger.InitDaemon(cfg.Verbose, cfg.LogLevel, cfg.Console, d.LogDir(), rotateConfig); err != nil { if err := logger.InitDaemon(cfg.LogLevel, cfg.Console, d.LogDir(), rotateConfig); err != nil {
return fmt.Errorf("init client daemon logger: %w", err) return fmt.Errorf("init client daemon logger: %w", err)
} }
logger.RedirectStdoutAndStderr(cfg.Console, path.Join(d.LogDir(), types.DaemonName)) logger.RedirectStdoutAndStderr(cfg.Console, path.Join(d.LogDir(), types.DaemonName))

View File

@ -94,7 +94,7 @@ var rootCmd = &cobra.Command{
MaxBackups: dfgetConfig.LogMaxBackups} MaxBackups: dfgetConfig.LogMaxBackups}
// Initialize logger // Initialize logger
if err := logger.InitDfget(dfgetConfig.Verbose, dfgetConfig.LogLevel, dfgetConfig.Console, d.LogDir(), rotateConfig); err != nil { if err := logger.InitDfget(dfgetConfig.LogLevel, dfgetConfig.Console, d.LogDir(), rotateConfig); err != nil {
return fmt.Errorf("init client dfget logger: %w", err) return fmt.Errorf("init client dfget logger: %w", err)
} }
@ -178,6 +178,8 @@ func init() {
flagSet.String("logdir", dfgetConfig.LogDir, "Dfget log directory") flagSet.String("logdir", dfgetConfig.LogDir, "Dfget log directory")
flagSet.String("logLevel", dfgetConfig.LogLevel, "Dfget log level, one of: debug, info, warn, error, fatal, panic")
flagSet.String("datadir", dfgetConfig.DataDir, "Dfget data directory") flagSet.String("datadir", dfgetConfig.DataDir, "Dfget data directory")
flagSet.String("cachedir", dfgetConfig.CacheDir, "Dfget cache directory") flagSet.String("cachedir", dfgetConfig.CacheDir, "Dfget cache directory")

View File

@ -72,7 +72,7 @@ for managing schedulers and seed peers, offering http apis and portal, etc.`,
MaxBackups: cfg.Server.LogMaxBackups} MaxBackups: cfg.Server.LogMaxBackups}
// Initialize logger. // Initialize logger.
if err := logger.InitManager(cfg.Verbose, cfg.LogLevel, cfg.Console, d.LogDir(), rotateConfig); err != nil { if err := logger.InitManager(cfg.Server.LogLevel, cfg.Console, d.LogDir(), rotateConfig); err != nil {
return fmt.Errorf("init manager logger: %w", err) return fmt.Errorf("init manager logger: %w", err)
} }
logger.RedirectStdoutAndStderr(cfg.Console, path.Join(d.LogDir(), types.ManagerName)) logger.RedirectStdoutAndStderr(cfg.Console, path.Join(d.LogDir(), types.ManagerName))

View File

@ -72,7 +72,7 @@ generate and maintain a P2P network during the download process, and push suitab
MaxBackups: cfg.Server.LogMaxBackups} MaxBackups: cfg.Server.LogMaxBackups}
// Initialize logger. // Initialize logger.
if err := logger.InitScheduler(cfg.Verbose, cfg.LogLevel, cfg.Console, d.LogDir(), rotateConfig); err != nil { if err := logger.InitScheduler(cfg.Server.LogLevel, cfg.Console, d.LogDir(), rotateConfig); err != nil {
return fmt.Errorf("init scheduler logger: %w", err) return fmt.Errorf("init scheduler logger: %w", err)
} }
logger.RedirectStdoutAndStderr(cfg.Console, path.Join(d.LogDir(), types.SchedulerName)) logger.RedirectStdoutAndStderr(cfg.Console, path.Join(d.LogDir(), types.SchedulerName))

View File

@ -162,14 +162,8 @@ network:
# Console shows log on console. # Console shows log on console.
console: false console: false
# Whether to enable debug level logger and enable pprof. # Listen port for pprof, default is -1 (means disabled).
verbose: true pprofPort: -1
# Use specific log level(debug, info, warn, error), take precedence over verbose flag.
# log-level: debug
# Listen port for pprof, only valid when the verbose option is true, default is -1.
pprof-port: -1
tracing: tracing:
# Jaeger endpoint url, like: http://jaeger.dragonfly.svc:4317. # Jaeger endpoint url, like: http://jaeger.dragonfly.svc:4317.

View File

@ -178,14 +178,8 @@ network:
# Console shows log on console. # Console shows log on console.
console: false console: false
# Whether to enable debug level logger and enable pprof. # Listen port for pprof, default is -1 (means disabled).
verbose: true pprofPort: -1
# Use specific log level(debug, info, warn, error), take precedence over verbose flag.
# log-level: debug
# Listen port for pprof, only valid when the verbose option is true, default is -1.
pprof-port: -1
tracing: tracing:
# Jaeger endpoint url, like: http://jaeger.dragonfly.svc:4317. # Jaeger endpoint url, like: http://jaeger.dragonfly.svc:4317.

View File

@ -53,8 +53,7 @@ var customCoreLevel atomic.Bool
var grpcLevel = zap.NewAtomicLevelAt(zapcore.WarnLevel) var grpcLevel = zap.NewAtomicLevelAt(zapcore.WarnLevel)
var customGrpcLevel atomic.Bool var customGrpcLevel atomic.Bool
func CreateLogger(filePath string, compress bool, stats bool, verbose bool, logLevel string, config LogRotateConfig) (*zap.Logger, zap.AtomicLevel, error) { func CreateLogger(filePath string, compress bool, stats bool, logLevel string, config LogRotateConfig) (*zap.Logger, zap.AtomicLevel, error) {
rotateConfig := &lumberjack.Logger{ rotateConfig := &lumberjack.Logger{
Filename: filePath, Filename: filePath,
MaxSize: config.MaxSize, MaxSize: config.MaxSize,
@ -68,7 +67,6 @@ func CreateLogger(filePath string, compress bool, stats bool, verbose bool, logL
encoderConfig := zap.NewProductionEncoderConfig() encoderConfig := zap.NewProductionEncoderConfig()
encoderConfig.EncodeTime = zapcore.TimeEncoderOfLayout(encodeTimeFormat) encoderConfig.EncodeTime = zapcore.TimeEncoderOfLayout(encodeTimeFormat)
var level = zap.NewAtomicLevelAt(zap.InfoLevel) var level = zap.NewAtomicLevelAt(zap.InfoLevel)
// Use logLevel first, then fallback to verbose flag.
if logLevel != "" { if logLevel != "" {
switch strings.ToLower(logLevel) { switch strings.ToLower(logLevel) {
case "debug": case "debug":
@ -82,8 +80,6 @@ func CreateLogger(filePath string, compress bool, stats bool, verbose bool, logL
default: default:
fmt.Printf("Warning: invalid log level '%s', using 'info' instead\n", logLevel) fmt.Printf("Warning: invalid log level '%s', using 'info' instead\n", logLevel)
} }
} else if verbose {
level = zap.NewAtomicLevelAt(zapcore.DebugLevel)
} }
if strings.HasSuffix(filePath, GrpcLogFileName) && customGrpcLevel.Load() { if strings.HasSuffix(filePath, GrpcLogFileName) && customGrpcLevel.Load() {

View File

@ -102,6 +102,7 @@ func SetGrpcLogger(log *zap.SugaredLogger) {
if vl, err := strconv.Atoi(vLevel); err == nil { if vl, err := strconv.Atoi(vLevel); err == nil {
v = vl v = vl
} }
grpclog.SetLoggerV2(&zapGrpc{GrpcLogger, v}) grpclog.SetLoggerV2(&zapGrpc{GrpcLogger, v})
} }

View File

@ -35,9 +35,9 @@ type logInitMeta struct {
setLoggerFunc func(log *zap.Logger) setLoggerFunc func(log *zap.Logger)
} }
func InitManager(verbose bool, logLevel string, console bool, dir string, rotateConfig LogRotateConfig) error { func InitManager(logLevel string, console bool, dir string, rotateConfig LogRotateConfig) error {
if console { if console {
return createConsoleLogger(verbose, logLevel) return createConsoleLogger(logLevel)
} }
logDir := filepath.Join(dir, types.ManagerName) logDir := filepath.Join(dir, types.ManagerName)
@ -64,12 +64,12 @@ func InitManager(verbose bool, logLevel string, console bool, dir string, rotate
}, },
} }
return createFileLogger(verbose, logLevel, meta, logDir, rotateConfig) return createFileLogger(logLevel, meta, logDir, rotateConfig)
} }
func InitScheduler(verbose bool, logLevel string, console bool, dir string, rotateConfig LogRotateConfig) error { func InitScheduler(logLevel string, console bool, dir string, rotateConfig LogRotateConfig) error {
if console { if console {
return createConsoleLogger(verbose, logLevel) return createConsoleLogger(logLevel)
} }
logDir := filepath.Join(dir, types.SchedulerName) logDir := filepath.Join(dir, types.SchedulerName)
@ -92,12 +92,12 @@ func InitScheduler(verbose bool, logLevel string, console bool, dir string, rota
}, },
} }
return createFileLogger(verbose, logLevel, meta, logDir, rotateConfig) return createFileLogger(logLevel, meta, logDir, rotateConfig)
} }
func InitDaemon(verbose bool, logLevel string, console bool, dir string, rotateConfig LogRotateConfig) error { func InitDaemon(logLevel string, console bool, dir string, rotateConfig LogRotateConfig) error {
if console { if console {
return createConsoleLogger(verbose, logLevel) return createConsoleLogger(logLevel)
} }
logDir := filepath.Join(dir, types.DaemonName) logDir := filepath.Join(dir, types.DaemonName)
@ -120,12 +120,12 @@ func InitDaemon(verbose bool, logLevel string, console bool, dir string, rotateC
}, },
} }
return createFileLogger(verbose, logLevel, meta, logDir, rotateConfig) return createFileLogger(logLevel, meta, logDir, rotateConfig)
} }
func InitDfget(verbose bool, logLevel string, console bool, dir string, rotateConfig LogRotateConfig) error { func InitDfget(logLevel string, console bool, dir string, rotateConfig LogRotateConfig) error {
if console { if console {
return createConsoleLogger(verbose, logLevel) return createConsoleLogger(logLevel)
} }
logDir := filepath.Join(dir, types.DfgetName) logDir := filepath.Join(dir, types.DfgetName)
@ -140,10 +140,10 @@ func InitDfget(verbose bool, logLevel string, console bool, dir string, rotateCo
}, },
} }
return createFileLogger(verbose, logLevel, meta, logDir, rotateConfig) return createFileLogger(logLevel, meta, logDir, rotateConfig)
} }
func InitDfcache(verbose bool, logLevel string, dir string, rotateConfig LogRotateConfig) error { func InitDfcache(logLevel string, dir string, rotateConfig LogRotateConfig) error {
logDir := filepath.Join(dir, types.DfcacheName) logDir := filepath.Join(dir, types.DfcacheName)
var meta = []logInitMeta{ var meta = []logInitMeta{
{ {
@ -156,14 +156,13 @@ func InitDfcache(verbose bool, logLevel string, dir string, rotateConfig LogRota
}, },
} }
return createFileLogger(verbose, logLevel, meta, logDir, rotateConfig) return createFileLogger(logLevel, meta, logDir, rotateConfig)
} }
func createConsoleLogger(verbose bool, logLevel string) error { func createConsoleLogger(logLevel string) error {
levels = nil levels = nil
config := zap.NewDevelopmentConfig() config := zap.NewDevelopmentConfig()
config.Level = zap.NewAtomicLevelAt(zap.InfoLevel) config.Level = zap.NewAtomicLevelAt(zap.InfoLevel)
// Use logLevel first, then fallback to verbose flag.
if logLevel != "" { if logLevel != "" {
switch strings.ToLower(logLevel) { switch strings.ToLower(logLevel) {
case "debug": case "debug":
@ -177,8 +176,6 @@ func createConsoleLogger(verbose bool, logLevel string) error {
default: default:
fmt.Printf("Warning: invalid log level '%s', using 'info' instead\n", logLevel) fmt.Printf("Warning: invalid log level '%s', using 'info' instead\n", logLevel)
} }
} else if verbose {
config.Level = zap.NewAtomicLevelAt(zap.DebugLevel)
} }
log, err := config.Build(zap.AddCaller(), zap.AddStacktrace(zap.WarnLevel), zap.AddCallerSkip(1)) log, err := config.Build(zap.AddCaller(), zap.AddStacktrace(zap.WarnLevel), zap.AddCallerSkip(1))
@ -199,13 +196,13 @@ func createConsoleLogger(verbose bool, logLevel string) error {
return nil return nil
} }
func createFileLogger(verbose bool, logLevel string, meta []logInitMeta, logDir string, rotateConfig LogRotateConfig) error { func createFileLogger(logLevel string, meta []logInitMeta, logDir string, rotateConfig LogRotateConfig) error {
levels = nil levels = nil
// create parent dir first // create parent dir first
_ = os.MkdirAll(logDir, fs.FileMode(0700)) _ = os.MkdirAll(logDir, fs.FileMode(0700))
for _, m := range meta { for _, m := range meta {
log, level, err := CreateLogger(path.Join(logDir, m.fileName), false, false, verbose, logLevel, rotateConfig) log, level, err := CreateLogger(path.Join(logDir, m.fileName), false, false, logLevel, rotateConfig)
if err != nil { if err != nil {
return err return err
} }
@ -217,6 +214,7 @@ func createFileLogger(verbose bool, logLevel string, meta []logInitMeta, logDir
levels = append(levels, level) levels = append(levels, level)
} }
startLoggerSignalHandler() startLoggerSignalHandler()
return nil return nil
} }

View File

@ -68,6 +68,9 @@ type ServerConfig struct {
// Server log directory. // Server log directory.
LogDir string `yaml:"logDir" mapstructure:"logDir"` LogDir string `yaml:"logDir" mapstructure:"logDir"`
// LogLevel is log level of server, supported values are "debug", "info", "warn", "error", "panic", "fatal".
LogLevel string `yaml:"logLevel" mapstructure:"logLevel"`
// Maximum size in megabytes of log files before rotation (default: 1024) // Maximum size in megabytes of log files before rotation (default: 1024)
LogMaxSize int `yaml:"logMaxSize" mapstructure:"logMaxSize"` LogMaxSize int `yaml:"logMaxSize" mapstructure:"logMaxSize"`
@ -403,7 +406,6 @@ func New() *Config {
return &Config{ return &Config{
Options: base.Options{ Options: base.Options{
Console: false, Console: false,
Verbose: false,
PProfPort: -1, PProfPort: -1,
Tracing: base.TracingConfig{ Tracing: base.TracingConfig{
ServiceName: types.ManagerName, ServiceName: types.ManagerName,
@ -420,6 +422,7 @@ func New() *Config {
REST: RESTConfig{ REST: RESTConfig{
Addr: DefaultRESTAddr, Addr: DefaultRESTAddr,
}, },
LogLevel: "info",
LogMaxSize: DefaultLogRotateMaxSize, LogMaxSize: DefaultLogRotateMaxSize,
LogMaxAge: DefaultLogRotateMaxAge, LogMaxAge: DefaultLogRotateMaxAge,
LogMaxBackups: DefaultLogRotateMaxBackups, LogMaxBackups: DefaultLogRotateMaxBackups,

View File

@ -97,6 +97,7 @@ func TestConfig_Load(t *testing.T) {
Name: "foo", Name: "foo",
CacheDir: "foo", CacheDir: "foo",
LogDir: "foo", LogDir: "foo",
LogLevel: "debug",
LogMaxSize: 512, LogMaxSize: 512,
LogMaxAge: 5, LogMaxAge: 5,
LogMaxBackups: 3, LogMaxBackups: 3,

View File

@ -2,6 +2,7 @@ server:
name: foo name: foo
cacheDir: foo cacheDir: foo
logDir: foo logDir: foo
logLevel: debug
logMaxSize: 512 logMaxSize: 512
logMaxAge: 5 logMaxAge: 5
logMaxBackups: 3 logMaxBackups: 3

View File

@ -54,7 +54,7 @@ func newMysql(cfg *config.Config) (*gorm.DB, error) {
// Initialize gorm logger. // Initialize gorm logger.
logLevel := gormlogger.Info logLevel := gormlogger.Info
if !cfg.Verbose { if cfg.Server.LogLevel != "info" {
logLevel = gormlogger.Warn logLevel = gormlogger.Warn
} }
gormLogger := zapgorm2.New(logger.CoreLogger.Desugar()).LogMode(logLevel) gormLogger := zapgorm2.New(logger.CoreLogger.Desugar()).LogMode(logLevel)

View File

@ -44,7 +44,7 @@ import (
func Init(cfg *config.Config, logDir string, service service.Service, database *database.Database, enforcer *casbin.Enforcer, func Init(cfg *config.Config, logDir string, service service.Service, database *database.Database, enforcer *casbin.Enforcer,
limiter ratelimiter.JobRateLimiter, assets static.ServeFileSystem) (*gin.Engine, error) { limiter ratelimiter.JobRateLimiter, assets static.ServeFileSystem) (*gin.Engine, error) {
// Set mode. // Set mode.
if !cfg.Verbose { if cfg.Server.LogLevel == "info" {
gin.SetMode(gin.ReleaseMode) gin.SetMode(gin.ReleaseMode)
} }

View File

@ -34,7 +34,7 @@ func SetGrpcLevel(level zapcore.Level) {
} }
// SetupDaemon sets daemon log config: path, console // SetupDaemon sets daemon log config: path, console
func SetupDaemon(logDir string, verbose bool, logLevel string, console bool, rotateConfig logger.LogRotateConfig) error { func SetupDaemon(logDir string, logLevel string, console bool, rotateConfig logger.LogRotateConfig) error {
var options []dfpath.Option var options []dfpath.Option
if logDir != "" { if logDir != "" {
options = append(options, dfpath.WithLogDir(logDir)) options = append(options, dfpath.WithLogDir(logDir))
@ -45,5 +45,5 @@ func SetupDaemon(logDir string, verbose bool, logLevel string, console bool, rot
return err return err
} }
return logger.InitDaemon(verbose, logLevel, console, d.LogDir(), rotateConfig) return logger.InitDaemon(logLevel, console, d.LogDir(), rotateConfig)
} }

View File

@ -91,6 +91,9 @@ type ServerConfig struct {
// Server log directory. // Server log directory.
LogDir string `yaml:"logDir" mapstructure:"logDir"` LogDir string `yaml:"logDir" mapstructure:"logDir"`
// LogLevel is log level of server, supported values are "debug", "info", "warn", "error", "panic", "fatal".
LogLevel string `yaml:"logLevel" mapstructure:"logLevel"`
// Maximum size in megabytes of log files before rotation (default: 1024) // Maximum size in megabytes of log files before rotation (default: 1024)
LogMaxSize int `yaml:"logMaxSize" mapstructure:"logMaxSize"` LogMaxSize int `yaml:"logMaxSize" mapstructure:"logMaxSize"`
@ -318,7 +321,6 @@ func New() *Config {
return &Config{ return &Config{
Options: base.Options{ Options: base.Options{
Console: false, Console: false,
Verbose: false,
PProfPort: -1, PProfPort: -1,
Tracing: base.TracingConfig{ Tracing: base.TracingConfig{
ServiceName: types.SchedulerName, ServiceName: types.SchedulerName,
@ -328,6 +330,7 @@ func New() *Config {
Port: DefaultServerPort, Port: DefaultServerPort,
AdvertisePort: DefaultServerAdvertisePort, AdvertisePort: DefaultServerAdvertisePort,
Host: fqdn.FQDNHostname, Host: fqdn.FQDNHostname,
LogLevel: "info",
LogMaxSize: DefaultLogRotateMaxSize, LogMaxSize: DefaultLogRotateMaxSize,
LogMaxAge: DefaultLogRotateMaxAge, LogMaxAge: DefaultLogRotateMaxAge,
LogMaxBackups: DefaultLogRotateMaxBackups, LogMaxBackups: DefaultLogRotateMaxBackups,

View File

@ -87,6 +87,7 @@ func TestConfig_Load(t *testing.T) {
}, },
CacheDir: "foo", CacheDir: "foo",
LogDir: "foo", LogDir: "foo",
LogLevel: "debug",
LogMaxSize: 512, LogMaxSize: 512,
LogMaxAge: 5, LogMaxAge: 5,
LogMaxBackups: 3, LogMaxBackups: 3,

View File

@ -12,6 +12,7 @@ server:
logDir: foo logDir: foo
pluginDir: foo pluginDir: foo
dataDir: foo dataDir: foo
logLevel: debug
logMaxSize: 512 logMaxSize: 512
logMaxAge: 5 logMaxAge: 5
logMaxBackups: 3 logMaxBackups: 3

View File

@ -25,7 +25,8 @@ manager:
enable: true enable: true
config: config:
console: false console: false
verbose: true server:
logLevel: debug
job: job:
rateLimit: rateLimit:
fillInterval: 1m fillInterval: 1m
@ -63,7 +64,8 @@ scheduler:
enableHost: true enableHost: true
config: config:
console: false console: false
verbose: true server:
logLevel: debug
scheduler: scheduler:
algorithm: default algorithm: default
retryBackToSourceLimit: 7 retryBackToSourceLimit: 7

View File

@ -25,7 +25,8 @@ manager:
enable: true enable: true
config: config:
console: false console: false
verbose: true server:
logLevel: debug
job: job:
rateLimit: rateLimit:
fillInterval: 1m fillInterval: 1m
@ -63,7 +64,8 @@ scheduler:
enableHost: true enableHost: true
config: config:
console: false console: false
verbose: true server:
logLevel: debug
scheduler: scheduler:
algorithm: default algorithm: default
retryBackToSourceLimit: 7 retryBackToSourceLimit: 7