feat: add seed peer logic (#1302)
* feat: announce seed peer Signed-off-by: Gaius <gaius.qi@gmail.com> * feat: remove cdn logic Signed-off-by: Gaius <gaius.qi@gmail.com> * feat: remove cdn job Signed-off-by: Gaius <gaius.qi@gmail.com> * feat: dfdaemon change host uuid to host id Signed-off-by: Gaius <gaius.qi@gmail.com> * feat: go generate mocks Signed-off-by: Gaius <gaius.qi@gmail.com> * feat: remove cdn compatibility Signed-off-by: Gaius <gaius.qi@gmail.com> * feat: change docker compose Signed-off-by: Gaius <gaius.qi@gmail.com> * fix: reuse panic Signed-off-by: Gaius <gaius.qi@gmail.com> * feat: compatible with v2.0.3-beta.2 Signed-off-by: Gaius <gaius.qi@gmail.com>
This commit is contained in:
parent
26cd8f0e22
commit
faa5e4e465
|
|
@ -15,7 +15,7 @@ env:
|
||||||
KIND_VERSION: v0.11.1
|
KIND_VERSION: v0.11.1
|
||||||
CONTAINERD_VERSION: v1.5.2
|
CONTAINERD_VERSION: v1.5.2
|
||||||
KIND_CONFIG_PATH: test/testdata/kind/config.yaml
|
KIND_CONFIG_PATH: test/testdata/kind/config.yaml
|
||||||
DRAGONFLY_STABLE_IMAGE_TAG: v2.0.2
|
DRAGONFLY_STABLE_IMAGE_TAG: v2.0.3-beta.2
|
||||||
DRAGONFLY_CHARTS_PATH: deploy/helm-charts/charts/dragonfly
|
DRAGONFLY_CHARTS_PATH: deploy/helm-charts/charts/dragonfly
|
||||||
DRAGONFLY_CHARTS_CONFIG_PATH: test/testdata/charts/config.yaml
|
DRAGONFLY_CHARTS_CONFIG_PATH: test/testdata/charts/config.yaml
|
||||||
DRAGONFLY_FILE_SERVER_PATH: test/testdata/k8s/file-server.yaml
|
DRAGONFLY_FILE_SERVER_PATH: test/testdata/k8s/file-server.yaml
|
||||||
|
|
@ -28,7 +28,7 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
module: ["manager", "scheduler", "cdn", "dfdaemon"]
|
module: ["manager", "scheduler", "dfdaemon"]
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
|
|
|
||||||
|
|
@ -4,9 +4,12 @@ run:
|
||||||
|
|
||||||
linters-settings:
|
linters-settings:
|
||||||
gocyclo:
|
gocyclo:
|
||||||
min-complexity: 42
|
min-complexity: 44
|
||||||
gci:
|
gci:
|
||||||
local-prefixes: d7y.io/dragonfly/v2
|
sections:
|
||||||
|
- standard
|
||||||
|
- default
|
||||||
|
- prefix(d7y.io/dragonfly/v2)
|
||||||
|
|
||||||
issues:
|
issues:
|
||||||
new: true
|
new: true
|
||||||
|
|
|
||||||
1017
api/manager/docs.go
1017
api/manager/docs.go
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
|
@ -4,12 +4,12 @@ definitions:
|
||||||
properties:
|
properties:
|
||||||
bio:
|
bio:
|
||||||
type: string
|
type: string
|
||||||
cdn_clusters:
|
created_at:
|
||||||
items:
|
type: string
|
||||||
$ref: '#/definitions/model.CDNCluster'
|
|
||||||
type: array
|
|
||||||
download_rate_limit:
|
download_rate_limit:
|
||||||
type: integer
|
type: integer
|
||||||
|
id:
|
||||||
|
type: integer
|
||||||
name:
|
name:
|
||||||
type: string
|
type: string
|
||||||
scheduler_clusters:
|
scheduler_clusters:
|
||||||
|
|
@ -22,6 +22,8 @@ definitions:
|
||||||
type: array
|
type: array
|
||||||
state:
|
state:
|
||||||
type: string
|
type: string
|
||||||
|
updated_at:
|
||||||
|
type: string
|
||||||
url:
|
url:
|
||||||
type: string
|
type: string
|
||||||
user:
|
user:
|
||||||
|
|
@ -29,80 +31,18 @@ definitions:
|
||||||
user_id:
|
user_id:
|
||||||
type: integer
|
type: integer
|
||||||
type: object
|
type: object
|
||||||
model.Assertion:
|
|
||||||
properties:
|
|
||||||
key:
|
|
||||||
type: string
|
|
||||||
policy:
|
|
||||||
items:
|
|
||||||
items:
|
|
||||||
type: string
|
|
||||||
type: array
|
|
||||||
type: array
|
|
||||||
policyMap:
|
|
||||||
additionalProperties:
|
|
||||||
type: integer
|
|
||||||
type: object
|
|
||||||
rm: {}
|
|
||||||
tokens:
|
|
||||||
items:
|
|
||||||
type: string
|
|
||||||
type: array
|
|
||||||
value:
|
|
||||||
type: string
|
|
||||||
type: object
|
|
||||||
model.AssertionMap:
|
|
||||||
additionalProperties:
|
|
||||||
$ref: '#/definitions/model.Assertion'
|
|
||||||
type: object
|
|
||||||
model.CDN:
|
|
||||||
properties:
|
|
||||||
cdnclusterID:
|
|
||||||
type: integer
|
|
||||||
download_port:
|
|
||||||
type: integer
|
|
||||||
host_name:
|
|
||||||
type: string
|
|
||||||
idc:
|
|
||||||
type: string
|
|
||||||
ip:
|
|
||||||
type: string
|
|
||||||
location:
|
|
||||||
type: string
|
|
||||||
port:
|
|
||||||
type: integer
|
|
||||||
state:
|
|
||||||
type: string
|
|
||||||
type: object
|
|
||||||
model.CDNCluster:
|
|
||||||
properties:
|
|
||||||
application_id:
|
|
||||||
type: integer
|
|
||||||
bio:
|
|
||||||
type: string
|
|
||||||
config:
|
|
||||||
$ref: '#/definitions/model.JSONMap'
|
|
||||||
is_default:
|
|
||||||
type: boolean
|
|
||||||
jobs:
|
|
||||||
items:
|
|
||||||
$ref: '#/definitions/model.Job'
|
|
||||||
type: array
|
|
||||||
name:
|
|
||||||
type: string
|
|
||||||
scheduler_clusters:
|
|
||||||
items:
|
|
||||||
$ref: '#/definitions/model.SchedulerCluster'
|
|
||||||
type: array
|
|
||||||
security_group_id:
|
|
||||||
type: integer
|
|
||||||
type: object
|
|
||||||
model.Config:
|
model.Config:
|
||||||
properties:
|
properties:
|
||||||
bio:
|
bio:
|
||||||
type: string
|
type: string
|
||||||
|
created_at:
|
||||||
|
type: string
|
||||||
|
id:
|
||||||
|
type: integer
|
||||||
name:
|
name:
|
||||||
type: string
|
type: string
|
||||||
|
updated_at:
|
||||||
|
type: string
|
||||||
user_id:
|
user_id:
|
||||||
type: integer
|
type: integer
|
||||||
value:
|
value:
|
||||||
|
|
@ -117,10 +57,10 @@ definitions:
|
||||||
$ref: '#/definitions/model.JSONMap'
|
$ref: '#/definitions/model.JSONMap'
|
||||||
bio:
|
bio:
|
||||||
type: string
|
type: string
|
||||||
cdn_clusters:
|
created_at:
|
||||||
items:
|
type: string
|
||||||
$ref: '#/definitions/model.CDNCluster'
|
id:
|
||||||
type: array
|
type: integer
|
||||||
result:
|
result:
|
||||||
$ref: '#/definitions/model.JSONMap'
|
$ref: '#/definitions/model.JSONMap'
|
||||||
scheduler_clusters:
|
scheduler_clusters:
|
||||||
|
|
@ -137,6 +77,8 @@ definitions:
|
||||||
type: string
|
type: string
|
||||||
type:
|
type:
|
||||||
type: string
|
type: string
|
||||||
|
updated_at:
|
||||||
|
type: string
|
||||||
user_id:
|
user_id:
|
||||||
type: integer
|
type: integer
|
||||||
type: object
|
type: object
|
||||||
|
|
@ -148,15 +90,25 @@ definitions:
|
||||||
type: string
|
type: string
|
||||||
client_secret:
|
client_secret:
|
||||||
type: string
|
type: string
|
||||||
|
created_at:
|
||||||
|
type: string
|
||||||
|
id:
|
||||||
|
type: integer
|
||||||
name:
|
name:
|
||||||
type: string
|
type: string
|
||||||
redirect_url:
|
redirect_url:
|
||||||
type: string
|
type: string
|
||||||
|
updated_at:
|
||||||
|
type: string
|
||||||
type: object
|
type: object
|
||||||
model.Scheduler:
|
model.Scheduler:
|
||||||
properties:
|
properties:
|
||||||
|
created_at:
|
||||||
|
type: string
|
||||||
host_name:
|
host_name:
|
||||||
type: string
|
type: string
|
||||||
|
id:
|
||||||
|
type: integer
|
||||||
idc:
|
idc:
|
||||||
type: string
|
type: string
|
||||||
ip:
|
ip:
|
||||||
|
|
@ -171,6 +123,8 @@ definitions:
|
||||||
type: integer
|
type: integer
|
||||||
state:
|
state:
|
||||||
type: string
|
type: string
|
||||||
|
updated_at:
|
||||||
|
type: string
|
||||||
type: object
|
type: object
|
||||||
model.SchedulerCluster:
|
model.SchedulerCluster:
|
||||||
properties:
|
properties:
|
||||||
|
|
@ -178,14 +132,14 @@ definitions:
|
||||||
type: integer
|
type: integer
|
||||||
bio:
|
bio:
|
||||||
type: string
|
type: string
|
||||||
cdn_clusters:
|
|
||||||
items:
|
|
||||||
$ref: '#/definitions/model.CDNCluster'
|
|
||||||
type: array
|
|
||||||
client_config:
|
client_config:
|
||||||
$ref: '#/definitions/model.JSONMap'
|
$ref: '#/definitions/model.JSONMap'
|
||||||
config:
|
config:
|
||||||
$ref: '#/definitions/model.JSONMap'
|
$ref: '#/definitions/model.JSONMap'
|
||||||
|
created_at:
|
||||||
|
type: string
|
||||||
|
id:
|
||||||
|
type: integer
|
||||||
is_default:
|
is_default:
|
||||||
type: boolean
|
type: boolean
|
||||||
jobs:
|
jobs:
|
||||||
|
|
@ -202,24 +156,36 @@ definitions:
|
||||||
items:
|
items:
|
||||||
$ref: '#/definitions/model.SeedPeerCluster'
|
$ref: '#/definitions/model.SeedPeerCluster'
|
||||||
type: array
|
type: array
|
||||||
|
updated_at:
|
||||||
|
type: string
|
||||||
type: object
|
type: object
|
||||||
model.SecurityGroup:
|
model.SecurityGroup:
|
||||||
properties:
|
properties:
|
||||||
bio:
|
bio:
|
||||||
type: string
|
type: string
|
||||||
|
created_at:
|
||||||
|
type: string
|
||||||
|
id:
|
||||||
|
type: integer
|
||||||
name:
|
name:
|
||||||
type: string
|
type: string
|
||||||
security_rules:
|
security_rules:
|
||||||
items:
|
items:
|
||||||
$ref: '#/definitions/model.SecurityRule'
|
$ref: '#/definitions/model.SecurityRule'
|
||||||
type: array
|
type: array
|
||||||
|
updated_at:
|
||||||
|
type: string
|
||||||
type: object
|
type: object
|
||||||
model.SecurityRule:
|
model.SecurityRule:
|
||||||
properties:
|
properties:
|
||||||
bio:
|
bio:
|
||||||
type: string
|
type: string
|
||||||
|
created_at:
|
||||||
|
type: string
|
||||||
domain:
|
domain:
|
||||||
type: string
|
type: string
|
||||||
|
id:
|
||||||
|
type: integer
|
||||||
name:
|
name:
|
||||||
type: string
|
type: string
|
||||||
proxy_domain:
|
proxy_domain:
|
||||||
|
|
@ -228,17 +194,25 @@ definitions:
|
||||||
items:
|
items:
|
||||||
$ref: '#/definitions/model.SecurityGroup'
|
$ref: '#/definitions/model.SecurityGroup'
|
||||||
type: array
|
type: array
|
||||||
|
updated_at:
|
||||||
|
type: string
|
||||||
type: object
|
type: object
|
||||||
model.SeedPeer:
|
model.SeedPeer:
|
||||||
properties:
|
properties:
|
||||||
|
created_at:
|
||||||
|
type: string
|
||||||
download_port:
|
download_port:
|
||||||
type: integer
|
type: integer
|
||||||
host_name:
|
host_name:
|
||||||
type: string
|
type: string
|
||||||
|
id:
|
||||||
|
type: integer
|
||||||
idc:
|
idc:
|
||||||
type: string
|
type: string
|
||||||
ip:
|
ip:
|
||||||
type: string
|
type: string
|
||||||
|
is_cdn:
|
||||||
|
type: boolean
|
||||||
location:
|
location:
|
||||||
type: string
|
type: string
|
||||||
net_topology:
|
net_topology:
|
||||||
|
|
@ -251,6 +225,8 @@ definitions:
|
||||||
type: string
|
type: string
|
||||||
type:
|
type:
|
||||||
type: string
|
type: string
|
||||||
|
updated_at:
|
||||||
|
type: string
|
||||||
type: object
|
type: object
|
||||||
model.SeedPeerCluster:
|
model.SeedPeerCluster:
|
||||||
properties:
|
properties:
|
||||||
|
|
@ -260,6 +236,10 @@ definitions:
|
||||||
type: string
|
type: string
|
||||||
config:
|
config:
|
||||||
$ref: '#/definitions/model.JSONMap'
|
$ref: '#/definitions/model.JSONMap'
|
||||||
|
created_at:
|
||||||
|
type: string
|
||||||
|
id:
|
||||||
|
type: integer
|
||||||
is_default:
|
is_default:
|
||||||
type: boolean
|
type: boolean
|
||||||
jobs:
|
jobs:
|
||||||
|
|
@ -276,6 +256,8 @@ definitions:
|
||||||
$ref: '#/definitions/model.JSONMap'
|
$ref: '#/definitions/model.JSONMap'
|
||||||
security_group_id:
|
security_group_id:
|
||||||
type: integer
|
type: integer
|
||||||
|
updated_at:
|
||||||
|
type: string
|
||||||
type: object
|
type: object
|
||||||
model.User:
|
model.User:
|
||||||
properties:
|
properties:
|
||||||
|
|
@ -283,8 +265,12 @@ definitions:
|
||||||
type: string
|
type: string
|
||||||
bio:
|
bio:
|
||||||
type: string
|
type: string
|
||||||
|
created_at:
|
||||||
|
type: string
|
||||||
email:
|
email:
|
||||||
type: string
|
type: string
|
||||||
|
id:
|
||||||
|
type: integer
|
||||||
location:
|
location:
|
||||||
type: string
|
type: string
|
||||||
name:
|
name:
|
||||||
|
|
@ -293,6 +279,8 @@ definitions:
|
||||||
type: string
|
type: string
|
||||||
state:
|
state:
|
||||||
type: string
|
type: string
|
||||||
|
updated_at:
|
||||||
|
type: string
|
||||||
type: object
|
type: object
|
||||||
rbac.Permission:
|
rbac.Permission:
|
||||||
properties:
|
properties:
|
||||||
|
|
@ -320,15 +308,6 @@ definitions:
|
||||||
- action
|
- action
|
||||||
- object
|
- object
|
||||||
type: object
|
type: object
|
||||||
types.CDNClusterConfig:
|
|
||||||
properties:
|
|
||||||
load_limit:
|
|
||||||
maximum: 5000
|
|
||||||
minimum: 1
|
|
||||||
type: integer
|
|
||||||
net_topology:
|
|
||||||
type: string
|
|
||||||
type: object
|
|
||||||
types.CreateApplicationRequest:
|
types.CreateApplicationRequest:
|
||||||
properties:
|
properties:
|
||||||
bio:
|
bio:
|
||||||
|
|
@ -350,43 +329,6 @@ definitions:
|
||||||
- name
|
- name
|
||||||
- user_id
|
- user_id
|
||||||
type: object
|
type: object
|
||||||
types.CreateCDNClusterRequest:
|
|
||||||
properties:
|
|
||||||
bio:
|
|
||||||
type: string
|
|
||||||
config:
|
|
||||||
$ref: '#/definitions/types.CDNClusterConfig'
|
|
||||||
is_default:
|
|
||||||
type: boolean
|
|
||||||
name:
|
|
||||||
type: string
|
|
||||||
required:
|
|
||||||
- config
|
|
||||||
- name
|
|
||||||
type: object
|
|
||||||
types.CreateCDNRequest:
|
|
||||||
properties:
|
|
||||||
cdn_cluster_id:
|
|
||||||
type: integer
|
|
||||||
download_port:
|
|
||||||
type: integer
|
|
||||||
host_name:
|
|
||||||
type: string
|
|
||||||
idc:
|
|
||||||
type: string
|
|
||||||
ip:
|
|
||||||
type: string
|
|
||||||
location:
|
|
||||||
type: string
|
|
||||||
port:
|
|
||||||
type: integer
|
|
||||||
required:
|
|
||||||
- cdn_cluster_id
|
|
||||||
- download_port
|
|
||||||
- host_name
|
|
||||||
- ip
|
|
||||||
- port
|
|
||||||
type: object
|
|
||||||
types.CreateConfigRequest:
|
types.CreateConfigRequest:
|
||||||
properties:
|
properties:
|
||||||
bio:
|
bio:
|
||||||
|
|
@ -409,10 +351,6 @@ definitions:
|
||||||
type: object
|
type: object
|
||||||
bio:
|
bio:
|
||||||
type: string
|
type: string
|
||||||
cdn_cluster_ids:
|
|
||||||
items:
|
|
||||||
type: integer
|
|
||||||
type: array
|
|
||||||
result:
|
result:
|
||||||
additionalProperties: true
|
additionalProperties: true
|
||||||
type: object
|
type: object
|
||||||
|
|
@ -467,8 +405,6 @@ definitions:
|
||||||
properties:
|
properties:
|
||||||
bio:
|
bio:
|
||||||
type: string
|
type: string
|
||||||
cdn_cluster_id:
|
|
||||||
type: integer
|
|
||||||
client_config:
|
client_config:
|
||||||
$ref: '#/definitions/types.SchedulerClusterClientConfig'
|
$ref: '#/definitions/types.SchedulerClusterClientConfig'
|
||||||
config:
|
config:
|
||||||
|
|
@ -566,6 +502,10 @@ definitions:
|
||||||
seed_peer_cluster_id:
|
seed_peer_cluster_id:
|
||||||
type: integer
|
type: integer
|
||||||
type:
|
type:
|
||||||
|
enum:
|
||||||
|
- super
|
||||||
|
- strong
|
||||||
|
- weak
|
||||||
type: string
|
type: string
|
||||||
required:
|
required:
|
||||||
- download_port
|
- download_port
|
||||||
|
|
@ -725,32 +665,6 @@ definitions:
|
||||||
required:
|
required:
|
||||||
- user_id
|
- user_id
|
||||||
type: object
|
type: object
|
||||||
types.UpdateCDNClusterRequest:
|
|
||||||
properties:
|
|
||||||
bio:
|
|
||||||
type: string
|
|
||||||
config:
|
|
||||||
$ref: '#/definitions/types.CDNClusterConfig'
|
|
||||||
is_default:
|
|
||||||
type: boolean
|
|
||||||
name:
|
|
||||||
type: string
|
|
||||||
type: object
|
|
||||||
types.UpdateCDNRequest:
|
|
||||||
properties:
|
|
||||||
cdn_cluster_id:
|
|
||||||
type: integer
|
|
||||||
download_port:
|
|
||||||
type: integer
|
|
||||||
idc:
|
|
||||||
type: string
|
|
||||||
ip:
|
|
||||||
type: string
|
|
||||||
location:
|
|
||||||
type: string
|
|
||||||
port:
|
|
||||||
type: integer
|
|
||||||
type: object
|
|
||||||
types.UpdateConfigRequest:
|
types.UpdateConfigRequest:
|
||||||
properties:
|
properties:
|
||||||
bio:
|
bio:
|
||||||
|
|
@ -789,8 +703,6 @@ definitions:
|
||||||
properties:
|
properties:
|
||||||
bio:
|
bio:
|
||||||
type: string
|
type: string
|
||||||
cdn_cluster_id:
|
|
||||||
type: integer
|
|
||||||
client_config:
|
client_config:
|
||||||
$ref: '#/definitions/types.SchedulerClusterClientConfig'
|
$ref: '#/definitions/types.SchedulerClusterClientConfig'
|
||||||
config:
|
config:
|
||||||
|
|
@ -869,6 +781,10 @@ definitions:
|
||||||
seed_peer_cluster_id:
|
seed_peer_cluster_id:
|
||||||
type: integer
|
type: integer
|
||||||
type:
|
type:
|
||||||
|
enum:
|
||||||
|
- super
|
||||||
|
- strong
|
||||||
|
- weak
|
||||||
type: string
|
type: string
|
||||||
type: object
|
type: object
|
||||||
types.UpdateUserRequest:
|
types.UpdateUserRequest:
|
||||||
|
|
@ -1042,65 +958,6 @@ paths:
|
||||||
summary: Update Application
|
summary: Update Application
|
||||||
tags:
|
tags:
|
||||||
- Application
|
- Application
|
||||||
/applications/{id}/cdn-clusters/{cdn_cluster_id}:
|
|
||||||
delete:
|
|
||||||
consumes:
|
|
||||||
- application/json
|
|
||||||
description: Delete CDN to Application
|
|
||||||
parameters:
|
|
||||||
- description: id
|
|
||||||
in: path
|
|
||||||
name: id
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
- description: cdn cluster id
|
|
||||||
in: path
|
|
||||||
name: cdn_cluster_id
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
produces:
|
|
||||||
- application/json
|
|
||||||
responses:
|
|
||||||
"200":
|
|
||||||
description: ""
|
|
||||||
"400":
|
|
||||||
description: ""
|
|
||||||
"404":
|
|
||||||
description: ""
|
|
||||||
"500":
|
|
||||||
description: ""
|
|
||||||
summary: Delete CDN to Application
|
|
||||||
tags:
|
|
||||||
- Application
|
|
||||||
put:
|
|
||||||
consumes:
|
|
||||||
- application/json
|
|
||||||
description: Add CDN to Application
|
|
||||||
parameters:
|
|
||||||
- description: id
|
|
||||||
in: path
|
|
||||||
name: id
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
- description: cdn cluster id
|
|
||||||
in: path
|
|
||||||
name: cdn_cluster_id
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
produces:
|
|
||||||
- application/json
|
|
||||||
responses:
|
|
||||||
"200":
|
|
||||||
description: ""
|
|
||||||
"400":
|
|
||||||
description: ""
|
|
||||||
"404":
|
|
||||||
description: ""
|
|
||||||
"500":
|
|
||||||
description: ""
|
|
||||||
summary: Add CDN to Application
|
|
||||||
tags:
|
|
||||||
- Application
|
|
||||||
/applications/{id}/scheduler-clusters/{scheduler_cluster_id}:
|
/applications/{id}/scheduler-clusters/{scheduler_cluster_id}:
|
||||||
delete:
|
delete:
|
||||||
consumes:
|
consumes:
|
||||||
|
|
@ -1219,362 +1076,6 @@ paths:
|
||||||
summary: Add SeedPeer to Application
|
summary: Add SeedPeer to Application
|
||||||
tags:
|
tags:
|
||||||
- Application
|
- Application
|
||||||
/cdn-clusters:
|
|
||||||
get:
|
|
||||||
consumes:
|
|
||||||
- application/json
|
|
||||||
description: Get CDNClusters
|
|
||||||
parameters:
|
|
||||||
- default: 0
|
|
||||||
description: current page
|
|
||||||
in: query
|
|
||||||
name: page
|
|
||||||
required: true
|
|
||||||
type: integer
|
|
||||||
- default: 10
|
|
||||||
description: return max item count, default 10, max 50
|
|
||||||
in: query
|
|
||||||
maximum: 50
|
|
||||||
minimum: 2
|
|
||||||
name: per_page
|
|
||||||
required: true
|
|
||||||
type: integer
|
|
||||||
produces:
|
|
||||||
- application/json
|
|
||||||
responses:
|
|
||||||
"200":
|
|
||||||
description: OK
|
|
||||||
schema:
|
|
||||||
items:
|
|
||||||
$ref: '#/definitions/model.CDNCluster'
|
|
||||||
type: array
|
|
||||||
"400":
|
|
||||||
description: ""
|
|
||||||
"404":
|
|
||||||
description: ""
|
|
||||||
"500":
|
|
||||||
description: ""
|
|
||||||
summary: Get CDNClusters
|
|
||||||
tags:
|
|
||||||
- CDNCluster
|
|
||||||
post:
|
|
||||||
consumes:
|
|
||||||
- application/json
|
|
||||||
description: create by json config
|
|
||||||
parameters:
|
|
||||||
- description: DNCluster
|
|
||||||
in: body
|
|
||||||
name: CDNCluster
|
|
||||||
required: true
|
|
||||||
schema:
|
|
||||||
$ref: '#/definitions/types.CreateCDNClusterRequest'
|
|
||||||
produces:
|
|
||||||
- application/json
|
|
||||||
responses:
|
|
||||||
"200":
|
|
||||||
description: OK
|
|
||||||
schema:
|
|
||||||
$ref: '#/definitions/model.CDNCluster'
|
|
||||||
"400":
|
|
||||||
description: ""
|
|
||||||
"404":
|
|
||||||
description: ""
|
|
||||||
"500":
|
|
||||||
description: ""
|
|
||||||
summary: Create CDNCluster
|
|
||||||
tags:
|
|
||||||
- CDNCluster
|
|
||||||
/cdn-clusters/{id}:
|
|
||||||
delete:
|
|
||||||
consumes:
|
|
||||||
- application/json
|
|
||||||
description: Destroy by id
|
|
||||||
parameters:
|
|
||||||
- description: id
|
|
||||||
in: path
|
|
||||||
name: id
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
produces:
|
|
||||||
- application/json
|
|
||||||
responses:
|
|
||||||
"200":
|
|
||||||
description: ""
|
|
||||||
"400":
|
|
||||||
description: ""
|
|
||||||
"404":
|
|
||||||
description: ""
|
|
||||||
"500":
|
|
||||||
description: ""
|
|
||||||
summary: Destroy CDNCluster
|
|
||||||
tags:
|
|
||||||
- CDNCluster
|
|
||||||
get:
|
|
||||||
consumes:
|
|
||||||
- application/json
|
|
||||||
description: Get CDNCluster by id
|
|
||||||
parameters:
|
|
||||||
- description: id
|
|
||||||
in: path
|
|
||||||
name: id
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
produces:
|
|
||||||
- application/json
|
|
||||||
responses:
|
|
||||||
"200":
|
|
||||||
description: OK
|
|
||||||
schema:
|
|
||||||
$ref: '#/definitions/model.CDNCluster'
|
|
||||||
"400":
|
|
||||||
description: ""
|
|
||||||
"404":
|
|
||||||
description: ""
|
|
||||||
"500":
|
|
||||||
description: ""
|
|
||||||
summary: Get CDNCluster
|
|
||||||
tags:
|
|
||||||
- CDNCluster
|
|
||||||
patch:
|
|
||||||
consumes:
|
|
||||||
- application/json
|
|
||||||
description: Update by json config
|
|
||||||
parameters:
|
|
||||||
- description: id
|
|
||||||
in: path
|
|
||||||
name: id
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
- description: CDNCluster
|
|
||||||
in: body
|
|
||||||
name: CDNCluster
|
|
||||||
required: true
|
|
||||||
schema:
|
|
||||||
$ref: '#/definitions/types.UpdateCDNClusterRequest'
|
|
||||||
produces:
|
|
||||||
- application/json
|
|
||||||
responses:
|
|
||||||
"200":
|
|
||||||
description: OK
|
|
||||||
schema:
|
|
||||||
$ref: '#/definitions/model.CDNCluster'
|
|
||||||
"400":
|
|
||||||
description: ""
|
|
||||||
"404":
|
|
||||||
description: ""
|
|
||||||
"500":
|
|
||||||
description: ""
|
|
||||||
summary: Update CDNCluster
|
|
||||||
tags:
|
|
||||||
- CDNCluster
|
|
||||||
/cdn-clusters/{id}/cdns/{cdn_id}:
|
|
||||||
put:
|
|
||||||
consumes:
|
|
||||||
- application/json
|
|
||||||
description: Add CDN to CDNCluster
|
|
||||||
parameters:
|
|
||||||
- description: id
|
|
||||||
in: path
|
|
||||||
name: id
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
- description: cdn id
|
|
||||||
in: path
|
|
||||||
name: cdn_id
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
produces:
|
|
||||||
- application/json
|
|
||||||
responses:
|
|
||||||
"200":
|
|
||||||
description: ""
|
|
||||||
"400":
|
|
||||||
description: ""
|
|
||||||
"404":
|
|
||||||
description: ""
|
|
||||||
"500":
|
|
||||||
description: ""
|
|
||||||
summary: Add Instance to CDNCluster
|
|
||||||
tags:
|
|
||||||
- CDNCluster
|
|
||||||
/cdn-clusters/{id}/scheduler-clusters/{scheduler_cluster_id}:
|
|
||||||
put:
|
|
||||||
consumes:
|
|
||||||
- application/json
|
|
||||||
description: Add SchedulerCluster to CDNCluster
|
|
||||||
parameters:
|
|
||||||
- description: id
|
|
||||||
in: path
|
|
||||||
name: id
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
- description: scheduler cluster id
|
|
||||||
in: path
|
|
||||||
name: scheduler_cluster_id
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
produces:
|
|
||||||
- application/json
|
|
||||||
responses:
|
|
||||||
"200":
|
|
||||||
description: ""
|
|
||||||
"400":
|
|
||||||
description: ""
|
|
||||||
"404":
|
|
||||||
description: ""
|
|
||||||
"500":
|
|
||||||
description: ""
|
|
||||||
summary: Add SchedulerCluster to CDNCluster
|
|
||||||
tags:
|
|
||||||
- CDNCluster
|
|
||||||
/cdns:
|
|
||||||
get:
|
|
||||||
consumes:
|
|
||||||
- application/json
|
|
||||||
description: Get CDNs
|
|
||||||
parameters:
|
|
||||||
- default: 0
|
|
||||||
description: current page
|
|
||||||
in: query
|
|
||||||
name: page
|
|
||||||
required: true
|
|
||||||
type: integer
|
|
||||||
- default: 10
|
|
||||||
description: return max item count, default 10, max 50
|
|
||||||
in: query
|
|
||||||
maximum: 50
|
|
||||||
minimum: 2
|
|
||||||
name: per_page
|
|
||||||
required: true
|
|
||||||
type: integer
|
|
||||||
produces:
|
|
||||||
- application/json
|
|
||||||
responses:
|
|
||||||
"200":
|
|
||||||
description: OK
|
|
||||||
schema:
|
|
||||||
items:
|
|
||||||
$ref: '#/definitions/model.CDN'
|
|
||||||
type: array
|
|
||||||
"400":
|
|
||||||
description: ""
|
|
||||||
"404":
|
|
||||||
description: ""
|
|
||||||
"500":
|
|
||||||
description: ""
|
|
||||||
summary: Get CDNs
|
|
||||||
tags:
|
|
||||||
- CDN
|
|
||||||
post:
|
|
||||||
consumes:
|
|
||||||
- application/json
|
|
||||||
description: create by json config
|
|
||||||
parameters:
|
|
||||||
- description: CDN
|
|
||||||
in: body
|
|
||||||
name: CDN
|
|
||||||
required: true
|
|
||||||
schema:
|
|
||||||
$ref: '#/definitions/types.CreateCDNRequest'
|
|
||||||
produces:
|
|
||||||
- application/json
|
|
||||||
responses:
|
|
||||||
"200":
|
|
||||||
description: OK
|
|
||||||
schema:
|
|
||||||
$ref: '#/definitions/model.CDN'
|
|
||||||
"400":
|
|
||||||
description: ""
|
|
||||||
"404":
|
|
||||||
description: ""
|
|
||||||
"500":
|
|
||||||
description: ""
|
|
||||||
summary: Create CDN
|
|
||||||
tags:
|
|
||||||
- CDN
|
|
||||||
/cdns/{id}:
|
|
||||||
delete:
|
|
||||||
consumes:
|
|
||||||
- application/json
|
|
||||||
description: Destroy by id
|
|
||||||
parameters:
|
|
||||||
- description: id
|
|
||||||
in: path
|
|
||||||
name: id
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
produces:
|
|
||||||
- application/json
|
|
||||||
responses:
|
|
||||||
"200":
|
|
||||||
description: ""
|
|
||||||
"400":
|
|
||||||
description: ""
|
|
||||||
"404":
|
|
||||||
description: ""
|
|
||||||
"500":
|
|
||||||
description: ""
|
|
||||||
summary: Destroy CDN
|
|
||||||
tags:
|
|
||||||
- CDN
|
|
||||||
get:
|
|
||||||
consumes:
|
|
||||||
- application/json
|
|
||||||
description: Get CDN by id
|
|
||||||
parameters:
|
|
||||||
- description: id
|
|
||||||
in: path
|
|
||||||
name: id
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
produces:
|
|
||||||
- application/json
|
|
||||||
responses:
|
|
||||||
"200":
|
|
||||||
description: OK
|
|
||||||
schema:
|
|
||||||
$ref: '#/definitions/model.CDN'
|
|
||||||
"400":
|
|
||||||
description: ""
|
|
||||||
"404":
|
|
||||||
description: ""
|
|
||||||
"500":
|
|
||||||
description: ""
|
|
||||||
summary: Get CDN
|
|
||||||
tags:
|
|
||||||
- CDN
|
|
||||||
patch:
|
|
||||||
consumes:
|
|
||||||
- application/json
|
|
||||||
description: Update by json config
|
|
||||||
parameters:
|
|
||||||
- description: id
|
|
||||||
in: path
|
|
||||||
name: id
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
- description: CDN
|
|
||||||
in: body
|
|
||||||
name: CDN
|
|
||||||
required: true
|
|
||||||
schema:
|
|
||||||
$ref: '#/definitions/types.UpdateCDNRequest'
|
|
||||||
produces:
|
|
||||||
- application/json
|
|
||||||
responses:
|
|
||||||
"200":
|
|
||||||
description: OK
|
|
||||||
schema:
|
|
||||||
$ref: '#/definitions/model.CDN'
|
|
||||||
"400":
|
|
||||||
description: ""
|
|
||||||
"404":
|
|
||||||
description: ""
|
|
||||||
"500":
|
|
||||||
description: ""
|
|
||||||
summary: Update CDN
|
|
||||||
tags:
|
|
||||||
- CDN
|
|
||||||
/configs:
|
/configs:
|
||||||
get:
|
get:
|
||||||
consumes:
|
consumes:
|
||||||
|
|
@ -2704,36 +2205,6 @@ paths:
|
||||||
summary: Update SecurityGroup
|
summary: Update SecurityGroup
|
||||||
tags:
|
tags:
|
||||||
- SecurityGroup
|
- SecurityGroup
|
||||||
/security-groups/{id}/cdn-clusters/{cdn_cluster_id}:
|
|
||||||
put:
|
|
||||||
consumes:
|
|
||||||
- application/json
|
|
||||||
description: Add CDN to SecurityGroup
|
|
||||||
parameters:
|
|
||||||
- description: id
|
|
||||||
in: path
|
|
||||||
name: id
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
- description: cdn cluster id
|
|
||||||
in: path
|
|
||||||
name: cdn_cluster_id
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
produces:
|
|
||||||
- application/json
|
|
||||||
responses:
|
|
||||||
"200":
|
|
||||||
description: ""
|
|
||||||
"400":
|
|
||||||
description: ""
|
|
||||||
"404":
|
|
||||||
description: ""
|
|
||||||
"500":
|
|
||||||
description: ""
|
|
||||||
summary: Add CDN to SecurityGroup
|
|
||||||
tags:
|
|
||||||
- SecurityGroup
|
|
||||||
/security-groups/{id}/scheduler-clusters/{scheduler_cluster_id}:
|
/security-groups/{id}/scheduler-clusters/{scheduler_cluster_id}:
|
||||||
put:
|
put:
|
||||||
consumes:
|
consumes:
|
||||||
|
|
|
||||||
26
cdn/cdn.go
26
cdn/cdn.go
|
|
@ -37,6 +37,7 @@ import (
|
||||||
"d7y.io/dragonfly/v2/cdn/supervisor/task"
|
"d7y.io/dragonfly/v2/cdn/supervisor/task"
|
||||||
"d7y.io/dragonfly/v2/client/daemon/upload"
|
"d7y.io/dragonfly/v2/client/daemon/upload"
|
||||||
logger "d7y.io/dragonfly/v2/internal/dflog"
|
logger "d7y.io/dragonfly/v2/internal/dflog"
|
||||||
|
"d7y.io/dragonfly/v2/manager/model"
|
||||||
"d7y.io/dragonfly/v2/pkg/rpc/manager"
|
"d7y.io/dragonfly/v2/pkg/rpc/manager"
|
||||||
managerClient "d7y.io/dragonfly/v2/pkg/rpc/manager/client"
|
managerClient "d7y.io/dragonfly/v2/pkg/rpc/manager/client"
|
||||||
"d7y.io/dragonfly/v2/pkg/util/hostutils"
|
"d7y.io/dragonfly/v2/pkg/util/hostutils"
|
||||||
|
|
@ -158,15 +159,18 @@ func (s *Server) Serve() error {
|
||||||
go func() {
|
go func() {
|
||||||
if s.configServer != nil {
|
if s.configServer != nil {
|
||||||
var rpcServerConfig = s.grpcServer.GetConfig()
|
var rpcServerConfig = s.grpcServer.GetConfig()
|
||||||
CDNInstance, err := s.configServer.UpdateCDN(&manager.UpdateCDNRequest{
|
CDNInstance, err := s.configServer.UpdateSeedPeer(&manager.UpdateSeedPeerRequest{
|
||||||
SourceType: manager.SourceType_CDN_SOURCE,
|
SourceType: manager.SourceType_SEED_PEER_SOURCE,
|
||||||
HostName: hostutils.FQDNHostname,
|
HostName: hostutils.FQDNHostname,
|
||||||
Ip: rpcServerConfig.AdvertiseIP,
|
Type: model.SeedPeerTypeSuperSeed,
|
||||||
Port: int32(rpcServerConfig.ListenPort),
|
IsCdn: true,
|
||||||
DownloadPort: int32(rpcServerConfig.DownloadPort),
|
Idc: s.config.Host.IDC,
|
||||||
Idc: s.config.Host.IDC,
|
NetTopology: s.config.Host.NetTopology,
|
||||||
Location: s.config.Host.Location,
|
Location: s.config.Host.Location,
|
||||||
CdnClusterId: uint64(s.config.Manager.CDNClusterID),
|
Ip: rpcServerConfig.AdvertiseIP,
|
||||||
|
Port: int32(rpcServerConfig.ListenPort),
|
||||||
|
DownloadPort: int32(rpcServerConfig.DownloadPort),
|
||||||
|
SeedPeerClusterId: uint64(s.config.Manager.SeedPeerClusterID),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Fatalf("update cdn instance failed: %v", err)
|
logger.Fatalf("update cdn instance failed: %v", err)
|
||||||
|
|
@ -175,8 +179,8 @@ func (s *Server) Serve() error {
|
||||||
logger.Infof("====starting keepalive cdn instance %s to manager %s====", CDNInstance, s.config.Manager.Addr)
|
logger.Infof("====starting keepalive cdn instance %s to manager %s====", CDNInstance, s.config.Manager.Addr)
|
||||||
s.configServer.KeepAlive(s.config.Manager.KeepAlive.Interval, &manager.KeepAliveRequest{
|
s.configServer.KeepAlive(s.config.Manager.KeepAlive.Interval, &manager.KeepAliveRequest{
|
||||||
HostName: hostutils.FQDNHostname,
|
HostName: hostutils.FQDNHostname,
|
||||||
SourceType: manager.SourceType_CDN_SOURCE,
|
SourceType: manager.SourceType_SEED_PEER_SOURCE,
|
||||||
ClusterId: uint64(s.config.Manager.CDNClusterID),
|
ClusterId: uint64(s.config.Manager.SeedPeerClusterID),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
|
||||||
|
|
@ -39,8 +39,8 @@ func New() *Config {
|
||||||
Task: task.DefaultConfig(),
|
Task: task.DefaultConfig(),
|
||||||
CDN: cdn.DefaultConfig(),
|
CDN: cdn.DefaultConfig(),
|
||||||
Manager: ManagerConfig{
|
Manager: ManagerConfig{
|
||||||
Addr: "",
|
Addr: "",
|
||||||
CDNClusterID: 0,
|
SeedPeerClusterID: 0,
|
||||||
KeepAlive: KeepAliveConfig{
|
KeepAlive: KeepAliveConfig{
|
||||||
Interval: 5 * time.Second,
|
Interval: 5 * time.Second,
|
||||||
},
|
},
|
||||||
|
|
@ -93,26 +93,27 @@ type ManagerConfig struct {
|
||||||
// NetAddr is manager address.
|
// NetAddr is manager address.
|
||||||
Addr string `yaml:"addr" mapstructure:"addr"`
|
Addr string `yaml:"addr" mapstructure:"addr"`
|
||||||
|
|
||||||
// CDNClusterID is cdn cluster id.
|
// SeedPeerClusterID is seed peer cluster id.
|
||||||
CDNClusterID uint `yaml:"cdnClusterID" mapstructure:"cdnClusterID"`
|
SeedPeerClusterID uint `yaml:"seedPeerClusterID" mapstructure:"seedPeerClusterID"`
|
||||||
|
|
||||||
// KeepAlive configuration
|
// KeepAlive configuration.
|
||||||
KeepAlive KeepAliveConfig `yaml:"keepAlive" mapstructure:"keepAlive"`
|
KeepAlive KeepAliveConfig `yaml:"keepAlive" mapstructure:"keepAlive"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c ManagerConfig) Validate() []error {
|
func (c ManagerConfig) Validate() []error {
|
||||||
var errors []error
|
var errors []error
|
||||||
if c.Addr != "" {
|
if c.Addr != "" {
|
||||||
if c.CDNClusterID <= 0 {
|
if c.SeedPeerClusterID <= 0 {
|
||||||
errors = append(errors, fmt.Errorf("cdn cluster id %d can't be a negative number", c.CDNClusterID))
|
errors = append(errors, fmt.Errorf("seed peer cluster id %d can't be a negative number", c.SeedPeerClusterID))
|
||||||
}
|
}
|
||||||
|
|
||||||
errors = append(errors, c.KeepAlive.Validate()...)
|
errors = append(errors, c.KeepAlive.Validate()...)
|
||||||
}
|
}
|
||||||
return errors
|
return errors
|
||||||
}
|
}
|
||||||
|
|
||||||
type KeepAliveConfig struct {
|
type KeepAliveConfig struct {
|
||||||
// Keep alive interval
|
// Keep alive interval.
|
||||||
Interval time.Duration `yaml:"interval" mapstructure:"interval"`
|
Interval time.Duration `yaml:"interval" mapstructure:"interval"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -125,9 +126,12 @@ func (c KeepAliveConfig) Validate() []error {
|
||||||
}
|
}
|
||||||
|
|
||||||
type HostConfig struct {
|
type HostConfig struct {
|
||||||
// Location for scheduler
|
// CDN idc.
|
||||||
Location string `mapstructure:"location" yaml:"location"`
|
|
||||||
|
|
||||||
// IDC for scheduler
|
|
||||||
IDC string `mapstructure:"idc" yaml:"idc"`
|
IDC string `mapstructure:"idc" yaml:"idc"`
|
||||||
|
|
||||||
|
// CDN network topology.
|
||||||
|
NetTopology string `mapstructure:"netTopology" yaml:"netTopology"`
|
||||||
|
|
||||||
|
// CDN location.
|
||||||
|
Location string `mapstructure:"location" yaml:"location"`
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -60,15 +60,16 @@ func TestConfig_Convert(t *testing.T) {
|
||||||
LogDir: "aaa",
|
LogDir: "aaa",
|
||||||
WorkHome: "/workHome",
|
WorkHome: "/workHome",
|
||||||
Manager: ManagerConfig{
|
Manager: ManagerConfig{
|
||||||
Addr: "127.0.0.1:8004",
|
Addr: "127.0.0.1:8004",
|
||||||
CDNClusterID: 5,
|
SeedPeerClusterID: 5,
|
||||||
KeepAlive: KeepAliveConfig{
|
KeepAlive: KeepAliveConfig{
|
||||||
Interval: 50 * time.Second,
|
Interval: 50 * time.Second,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Host: HostConfig{
|
Host: HostConfig{
|
||||||
Location: "beijing",
|
IDC: "na61",
|
||||||
IDC: "na61",
|
NetTopology: "t1",
|
||||||
|
Location: "beijing",
|
||||||
},
|
},
|
||||||
Metrics: &RestConfig{
|
Metrics: &RestConfig{
|
||||||
Addr: ":8081",
|
Addr: ":8081",
|
||||||
|
|
@ -155,15 +156,16 @@ func TestConfig_Convert(t *testing.T) {
|
||||||
WriterRoutineLimit: 4,
|
WriterRoutineLimit: 4,
|
||||||
},
|
},
|
||||||
Manager: ManagerConfig{
|
Manager: ManagerConfig{
|
||||||
Addr: "127.0.0.1:8004",
|
Addr: "127.0.0.1:8004",
|
||||||
CDNClusterID: 5,
|
SeedPeerClusterID: 5,
|
||||||
KeepAlive: KeepAliveConfig{
|
KeepAlive: KeepAliveConfig{
|
||||||
Interval: 50 * time.Second,
|
Interval: 50 * time.Second,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Host: HostConfig{
|
Host: HostConfig{
|
||||||
Location: "beijing",
|
IDC: "na61",
|
||||||
IDC: "na61",
|
NetTopology: "t1",
|
||||||
|
Location: "beijing",
|
||||||
},
|
},
|
||||||
LogDir: "aaa",
|
LogDir: "aaa",
|
||||||
WorkHome: "/workHome",
|
WorkHome: "/workHome",
|
||||||
|
|
|
||||||
|
|
@ -84,8 +84,8 @@ func (css *Server) ObtainSeeds(ctx context.Context, req *cdnsystem.SeedRequest,
|
||||||
hostID := idgen.CDNHostID(hostutils.FQDNHostname, int32(css.config.ListenPort))
|
hostID := idgen.CDNHostID(hostutils.FQDNHostname, int32(css.config.ListenPort))
|
||||||
// begin piece
|
// begin piece
|
||||||
psc <- &cdnsystem.PieceSeed{
|
psc <- &cdnsystem.PieceSeed{
|
||||||
PeerId: peerID,
|
PeerId: peerID,
|
||||||
HostUuid: hostID,
|
HostId: hostID,
|
||||||
PieceInfo: &base.PieceInfo{
|
PieceInfo: &base.PieceInfo{
|
||||||
PieceNum: common.BeginOfPiece,
|
PieceNum: common.BeginOfPiece,
|
||||||
},
|
},
|
||||||
|
|
@ -105,8 +105,8 @@ func (css *Server) ObtainSeeds(ctx context.Context, req *cdnsystem.SeedRequest,
|
||||||
}
|
}
|
||||||
for piece := range pieceChan {
|
for piece := range pieceChan {
|
||||||
pieceSeed := &cdnsystem.PieceSeed{
|
pieceSeed := &cdnsystem.PieceSeed{
|
||||||
PeerId: peerID,
|
PeerId: peerID,
|
||||||
HostUuid: hostID,
|
HostId: hostID,
|
||||||
PieceInfo: &base.PieceInfo{
|
PieceInfo: &base.PieceInfo{
|
||||||
PieceNum: int32(piece.PieceNum),
|
PieceNum: int32(piece.PieceNum),
|
||||||
RangeStart: piece.PieceRange.StartIndex,
|
RangeStart: piece.PieceRange.StartIndex,
|
||||||
|
|
@ -148,7 +148,7 @@ func (css *Server) ObtainSeeds(ctx context.Context, req *cdnsystem.SeedRequest,
|
||||||
}
|
}
|
||||||
pieceSeed := &cdnsystem.PieceSeed{
|
pieceSeed := &cdnsystem.PieceSeed{
|
||||||
PeerId: peerID,
|
PeerId: peerID,
|
||||||
HostUuid: hostID,
|
HostId: hostID,
|
||||||
Done: true,
|
Done: true,
|
||||||
ContentLength: seedTask.SourceFileLength,
|
ContentLength: seedTask.SourceFileLength,
|
||||||
TotalPieceCount: seedTask.TotalPieceCount,
|
TotalPieceCount: seedTask.TotalPieceCount,
|
||||||
|
|
|
||||||
|
|
@ -183,7 +183,7 @@ func newManagerClient(client managerclient.Client, hostOption HostOption) intern
|
||||||
|
|
||||||
func (mc *managerClient) Get() (interface{}, error) {
|
func (mc *managerClient) Get() (interface{}, error) {
|
||||||
schedulers, err := mc.ListSchedulers(&manager.ListSchedulersRequest{
|
schedulers, err := mc.ListSchedulers(&manager.ListSchedulersRequest{
|
||||||
SourceType: manager.SourceType_CLIENT_SOURCE,
|
SourceType: manager.SourceType_PEER_SOURCE,
|
||||||
HostName: mc.hostOption.Hostname,
|
HostName: mc.hostOption.Hostname,
|
||||||
Ip: mc.hostOption.AdvertiseIP,
|
Ip: mc.hostOption.AdvertiseIP,
|
||||||
HostInfo: map[string]string{
|
HostInfo: map[string]string{
|
||||||
|
|
|
||||||
|
|
@ -91,21 +91,6 @@ func (mr *MockClientMockRecorder) ListSchedulers(arg0 interface{}) *gomock.Call
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSchedulers", reflect.TypeOf((*MockClient)(nil).ListSchedulers), arg0)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSchedulers", reflect.TypeOf((*MockClient)(nil).ListSchedulers), arg0)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateCDN mocks base method.
|
|
||||||
func (m *MockClient) UpdateCDN(arg0 *manager.UpdateCDNRequest) (*manager.CDN, error) {
|
|
||||||
m.ctrl.T.Helper()
|
|
||||||
ret := m.ctrl.Call(m, "UpdateCDN", arg0)
|
|
||||||
ret0, _ := ret[0].(*manager.CDN)
|
|
||||||
ret1, _ := ret[1].(error)
|
|
||||||
return ret0, ret1
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateCDN indicates an expected call of UpdateCDN.
|
|
||||||
func (mr *MockClientMockRecorder) UpdateCDN(arg0 interface{}) *gomock.Call {
|
|
||||||
mr.mock.ctrl.T.Helper()
|
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateCDN", reflect.TypeOf((*MockClient)(nil).UpdateCDN), arg0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateScheduler mocks base method.
|
// UpdateScheduler mocks base method.
|
||||||
func (m *MockClient) UpdateScheduler(arg0 *manager.UpdateSchedulerRequest) (*manager.Scheduler, error) {
|
func (m *MockClient) UpdateScheduler(arg0 *manager.UpdateSchedulerRequest) (*manager.Scheduler, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
|
|
|
||||||
|
|
@ -152,34 +152,52 @@ func ConvertPattern(p string, defaultPattern scheduler.Pattern) scheduler.Patter
|
||||||
}
|
}
|
||||||
|
|
||||||
type SchedulerOption struct {
|
type SchedulerOption struct {
|
||||||
// Manager is to get the scheduler configuration remotely
|
// Manager is to get the scheduler configuration remotely.
|
||||||
Manager ManagerOption `mapstructure:"manager" yaml:"manager"`
|
Manager ManagerOption `mapstructure:"manager" yaml:"manager"`
|
||||||
// NetAddrs is scheduler addresses.
|
// NetAddrs is scheduler addresses.
|
||||||
NetAddrs []dfnet.NetAddr `mapstructure:"netAddrs" yaml:"netAddrs"`
|
NetAddrs []dfnet.NetAddr `mapstructure:"netAddrs" yaml:"netAddrs"`
|
||||||
// ScheduleTimeout is request timeout.
|
// ScheduleTimeout is request timeout.
|
||||||
ScheduleTimeout clientutil.Duration `mapstructure:"scheduleTimeout" yaml:"scheduleTimeout"`
|
ScheduleTimeout clientutil.Duration `mapstructure:"scheduleTimeout" yaml:"scheduleTimeout"`
|
||||||
// DisableAutoBackSource indicates not back source normally, only scheduler says back source
|
// DisableAutoBackSource indicates not back source normally, only scheduler says back source.
|
||||||
DisableAutoBackSource bool `mapstructure:"disableAutoBackSource" yaml:"disableAutoBackSource"`
|
DisableAutoBackSource bool `mapstructure:"disableAutoBackSource" yaml:"disableAutoBackSource"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ManagerOption struct {
|
type ManagerOption struct {
|
||||||
// Enable get configuration from manager
|
// Enable get configuration from manager.
|
||||||
Enable bool `mapstructure:"enable" yaml:"enable"`
|
Enable bool `mapstructure:"enable" yaml:"enable"`
|
||||||
// NetAddrs is manager addresses.
|
// NetAddrs is manager addresses.
|
||||||
NetAddrs []dfnet.NetAddr `mapstructure:"netAddrs" yaml:"netAddrs"`
|
NetAddrs []dfnet.NetAddr `mapstructure:"netAddrs" yaml:"netAddrs"`
|
||||||
// RefreshInterval is the refresh interval
|
// RefreshInterval is the refresh interval.
|
||||||
RefreshInterval time.Duration `mapstructure:"refreshInterval" yaml:"refreshInterval"`
|
RefreshInterval time.Duration `mapstructure:"refreshInterval" yaml:"refreshInterval"`
|
||||||
|
// SeedPeer configuration.
|
||||||
|
SeedPeer SeedPeerOption `mapstructure:"seedPeer" yaml:"seedPeer"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type SeedPeerOption struct {
|
||||||
|
// Enable seed peer mode.
|
||||||
|
Enable bool `mapstructure:"enable" yaml:"enable"`
|
||||||
|
// Type is seed peer type.
|
||||||
|
Type string `mapstructure:"type" yaml:"type"`
|
||||||
|
// ClusterID is seed peer cluster id.
|
||||||
|
ClusterID uint `mapstructure:"clusterID" yaml:"clusterID"`
|
||||||
|
// KeepAlive configuration.
|
||||||
|
KeepAlive KeepAliveOption `yaml:"keepAlive" mapstructure:"keepAlive"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type KeepAliveOption struct {
|
||||||
|
// Keep alive interval.
|
||||||
|
Interval time.Duration `yaml:"interval" mapstructure:"interval"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type HostOption struct {
|
type HostOption struct {
|
||||||
// SecurityDomain is the security domain
|
// SecurityDomain is the security domain
|
||||||
SecurityDomain string `mapstructure:"securityDomain" yaml:"securityDomain"`
|
SecurityDomain string `mapstructure:"securityDomain" yaml:"securityDomain"`
|
||||||
// Location for scheduler
|
|
||||||
Location string `mapstructure:"location" yaml:"location"`
|
|
||||||
// IDC for scheduler
|
// IDC for scheduler
|
||||||
IDC string `mapstructure:"idc" yaml:"idc"`
|
IDC string `mapstructure:"idc" yaml:"idc"`
|
||||||
// Peerhost net topology for scheduler
|
// Peerhost net topology for scheduler
|
||||||
NetTopology string `mapstructure:"netTopology" yaml:"netTopology"`
|
NetTopology string `mapstructure:"netTopology" yaml:"netTopology"`
|
||||||
|
// Location for scheduler
|
||||||
|
Location string `mapstructure:"location" yaml:"location"`
|
||||||
// Hostname is daemon host name
|
// Hostname is daemon host name
|
||||||
Hostname string `mapstructure:"hostname" yaml:"hostname"`
|
Hostname string `mapstructure:"hostname" yaml:"hostname"`
|
||||||
// The listen ip for all tcp services of daemon
|
// The listen ip for all tcp services of daemon
|
||||||
|
|
|
||||||
|
|
@ -26,6 +26,7 @@ import (
|
||||||
"golang.org/x/time/rate"
|
"golang.org/x/time/rate"
|
||||||
|
|
||||||
"d7y.io/dragonfly/v2/client/clientutil"
|
"d7y.io/dragonfly/v2/client/clientutil"
|
||||||
|
"d7y.io/dragonfly/v2/manager/model"
|
||||||
"d7y.io/dragonfly/v2/pkg/dfnet"
|
"d7y.io/dragonfly/v2/pkg/dfnet"
|
||||||
"d7y.io/dragonfly/v2/pkg/util/hostutils"
|
"d7y.io/dragonfly/v2/pkg/util/hostutils"
|
||||||
"d7y.io/dragonfly/v2/pkg/util/net/iputils"
|
"d7y.io/dragonfly/v2/pkg/util/net/iputils"
|
||||||
|
|
@ -39,6 +40,14 @@ var peerHostConfig = DaemonOption{
|
||||||
Manager: ManagerOption{
|
Manager: ManagerOption{
|
||||||
Enable: false,
|
Enable: false,
|
||||||
RefreshInterval: 5 * time.Minute,
|
RefreshInterval: 5 * time.Minute,
|
||||||
|
SeedPeer: SeedPeerOption{
|
||||||
|
Enable: false,
|
||||||
|
Type: model.SeedPeerTypeSuperSeed,
|
||||||
|
ClusterID: 1,
|
||||||
|
KeepAlive: KeepAliveOption{
|
||||||
|
Interval: 5 * time.Second,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
NetAddrs: []dfnet.NetAddr{
|
NetAddrs: []dfnet.NetAddr{
|
||||||
{
|
{
|
||||||
|
|
|
||||||
|
|
@ -26,6 +26,7 @@ import (
|
||||||
"golang.org/x/time/rate"
|
"golang.org/x/time/rate"
|
||||||
|
|
||||||
"d7y.io/dragonfly/v2/client/clientutil"
|
"d7y.io/dragonfly/v2/client/clientutil"
|
||||||
|
"d7y.io/dragonfly/v2/manager/model"
|
||||||
"d7y.io/dragonfly/v2/pkg/dfnet"
|
"d7y.io/dragonfly/v2/pkg/dfnet"
|
||||||
"d7y.io/dragonfly/v2/pkg/util/hostutils"
|
"d7y.io/dragonfly/v2/pkg/util/hostutils"
|
||||||
"d7y.io/dragonfly/v2/pkg/util/net/iputils"
|
"d7y.io/dragonfly/v2/pkg/util/net/iputils"
|
||||||
|
|
@ -39,6 +40,14 @@ var peerHostConfig = DaemonOption{
|
||||||
Manager: ManagerOption{
|
Manager: ManagerOption{
|
||||||
Enable: false,
|
Enable: false,
|
||||||
RefreshInterval: 5 * time.Minute,
|
RefreshInterval: 5 * time.Minute,
|
||||||
|
SeedPeer: SeedPeerOption{
|
||||||
|
Enable: false,
|
||||||
|
Type: model.SeedPeerTypeSuperSeed,
|
||||||
|
ClusterID: 1,
|
||||||
|
KeepAlive: KeepAliveOption{
|
||||||
|
Interval: 5 * time.Second,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
NetAddrs: []dfnet.NetAddr{
|
NetAddrs: []dfnet.NetAddr{
|
||||||
{
|
{
|
||||||
|
|
|
||||||
|
|
@ -26,6 +26,7 @@ import (
|
||||||
"gopkg.in/yaml.v3"
|
"gopkg.in/yaml.v3"
|
||||||
|
|
||||||
"d7y.io/dragonfly/v2/client/clientutil"
|
"d7y.io/dragonfly/v2/client/clientutil"
|
||||||
|
"d7y.io/dragonfly/v2/manager/model"
|
||||||
"d7y.io/dragonfly/v2/pkg/dfnet"
|
"d7y.io/dragonfly/v2/pkg/dfnet"
|
||||||
"d7y.io/dragonfly/v2/pkg/unit"
|
"d7y.io/dragonfly/v2/pkg/unit"
|
||||||
)
|
)
|
||||||
|
|
@ -239,6 +240,14 @@ func TestPeerHostOption_Load(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
RefreshInterval: 5 * time.Minute,
|
RefreshInterval: 5 * time.Minute,
|
||||||
|
SeedPeer: SeedPeerOption{
|
||||||
|
Enable: false,
|
||||||
|
Type: model.SeedPeerTypeStrongSeed,
|
||||||
|
ClusterID: 2,
|
||||||
|
KeepAlive: KeepAliveOption{
|
||||||
|
Interval: 10 * time.Second,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
NetAddrs: []dfnet.NetAddr{
|
NetAddrs: []dfnet.NetAddr{
|
||||||
{
|
{
|
||||||
|
|
|
||||||
|
|
@ -12,6 +12,12 @@ scheduler:
|
||||||
- type: tcp
|
- type: tcp
|
||||||
addr: 127.0.0.1:65003
|
addr: 127.0.0.1:65003
|
||||||
refreshInterval: 5m
|
refreshInterval: 5m
|
||||||
|
seedPeer:
|
||||||
|
enable: false
|
||||||
|
type: strong
|
||||||
|
clusterID: 2
|
||||||
|
keepAlive:
|
||||||
|
interval: 10s
|
||||||
netAddrs:
|
netAddrs:
|
||||||
- type: tcp
|
- type: tcp
|
||||||
addr: 127.0.0.1:8002
|
addr: 127.0.0.1:8002
|
||||||
|
|
|
||||||
|
|
@ -89,6 +89,7 @@ type clientDaemon struct {
|
||||||
dynconfig config.Dynconfig
|
dynconfig config.Dynconfig
|
||||||
dfpath dfpath.Dfpath
|
dfpath dfpath.Dfpath
|
||||||
schedulers []*manager.Scheduler
|
schedulers []*manager.Scheduler
|
||||||
|
managerClient managerclient.Client
|
||||||
schedulerClient schedulerclient.SchedulerClient
|
schedulerClient schedulerclient.SchedulerClient
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -97,7 +98,7 @@ func New(opt *config.DaemonOption, d dfpath.Dfpath) (Daemon, error) {
|
||||||
source.UpdatePluginDir(d.PluginDir())
|
source.UpdatePluginDir(d.PluginDir())
|
||||||
|
|
||||||
host := &scheduler.PeerHost{
|
host := &scheduler.PeerHost{
|
||||||
Uuid: idgen.UUIDString(),
|
Id: idgen.HostID(opt.Host.Hostname, int32(opt.Download.PeerGRPC.TCPListen.PortRange.Start)),
|
||||||
Ip: opt.Host.AdvertiseIP,
|
Ip: opt.Host.AdvertiseIP,
|
||||||
RpcPort: int32(opt.Download.PeerGRPC.TCPListen.PortRange.Start),
|
RpcPort: int32(opt.Download.PeerGRPC.TCPListen.PortRange.Start),
|
||||||
DownPort: 0,
|
DownPort: 0,
|
||||||
|
|
@ -112,23 +113,27 @@ func New(opt *config.DaemonOption, d dfpath.Dfpath) (Daemon, error) {
|
||||||
addrs []dfnet.NetAddr
|
addrs []dfnet.NetAddr
|
||||||
schedulers []*manager.Scheduler
|
schedulers []*manager.Scheduler
|
||||||
dynconfig config.Dynconfig
|
dynconfig config.Dynconfig
|
||||||
|
managerClient managerclient.Client
|
||||||
defaultPattern = config.ConvertPattern(opt.Download.DefaultPattern, scheduler.Pattern_P2P)
|
defaultPattern = config.ConvertPattern(opt.Download.DefaultPattern, scheduler.Pattern_P2P)
|
||||||
)
|
)
|
||||||
|
|
||||||
if opt.Scheduler.Manager.Enable == true {
|
if opt.Scheduler.Manager.Enable == true {
|
||||||
// New manager client
|
// New manager client
|
||||||
managerClient, err := managerclient.NewWithAddrs(opt.Scheduler.Manager.NetAddrs)
|
var err error
|
||||||
|
managerClient, err = managerclient.NewWithAddrs(opt.Scheduler.Manager.NetAddrs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// New dynconfig client
|
// New dynconfig client
|
||||||
if dynconfig, err = config.NewDynconfig(managerClient, d.CacheDir(), opt.Host, opt.Scheduler.Manager.RefreshInterval); err != nil {
|
dynconfig, err = config.NewDynconfig(managerClient, d.CacheDir(), opt.Host, opt.Scheduler.Manager.RefreshInterval)
|
||||||
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get schedulers from manager
|
// Get schedulers from manager
|
||||||
if schedulers, err = dynconfig.GetSchedulers(); err != nil {
|
schedulers, err = dynconfig.GetSchedulers()
|
||||||
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -229,6 +234,7 @@ func New(opt *config.DaemonOption, d dfpath.Dfpath) (Daemon, error) {
|
||||||
dynconfig: dynconfig,
|
dynconfig: dynconfig,
|
||||||
dfpath: d,
|
dfpath: d,
|
||||||
schedulers: schedulers,
|
schedulers: schedulers,
|
||||||
|
managerClient: managerClient,
|
||||||
schedulerClient: sched,
|
schedulerClient: sched,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
@ -452,6 +458,24 @@ func (cd *clientDaemon) Serve() error {
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
|
||||||
|
// enable seed peer mode
|
||||||
|
if cd.managerClient != nil && cd.Option.Scheduler.Manager.SeedPeer.Enable {
|
||||||
|
logger.Info("announce to manager")
|
||||||
|
if err := cd.announceSeedPeer(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
g.Go(func() error {
|
||||||
|
logger.Info("keepalive to manager")
|
||||||
|
cd.managerClient.KeepAlive(cd.Option.Scheduler.Manager.SeedPeer.KeepAlive.Interval, &manager.KeepAliveRequest{
|
||||||
|
SourceType: manager.SourceType_SEED_PEER_SOURCE,
|
||||||
|
HostName: cd.Option.Host.Hostname,
|
||||||
|
ClusterId: uint64(cd.Option.Scheduler.Manager.SeedPeer.ClusterID),
|
||||||
|
})
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
if cd.Option.AliveTime.Duration > 0 {
|
if cd.Option.AliveTime.Duration > 0 {
|
||||||
g.Go(func() error {
|
g.Go(func() error {
|
||||||
for {
|
for {
|
||||||
|
|
@ -565,6 +589,13 @@ func (cd *clientDaemon) Stop() {
|
||||||
}
|
}
|
||||||
logger.Info("dynconfig client closed")
|
logger.Info("dynconfig client closed")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if cd.managerClient != nil {
|
||||||
|
if err := cd.managerClient.Close(); err != nil {
|
||||||
|
logger.Errorf("manager client failed to stop: %s", err.Error())
|
||||||
|
}
|
||||||
|
logger.Info("manager client closed")
|
||||||
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -586,7 +617,7 @@ func (cd *clientDaemon) OnNotify(data *config.DynconfigData) {
|
||||||
logger.Infof("scheduler addresses have been updated: %#v", addrs)
|
logger.Infof("scheduler addresses have been updated: %#v", addrs)
|
||||||
}
|
}
|
||||||
|
|
||||||
// getSchedulerIPs get ips by schedulers.
|
// getSchedulerIPs gets ips by schedulers.
|
||||||
func getSchedulerIPs(schedulers []*manager.Scheduler) []string {
|
func getSchedulerIPs(schedulers []*manager.Scheduler) []string {
|
||||||
ips := []string{}
|
ips := []string{}
|
||||||
for _, scheduler := range schedulers {
|
for _, scheduler := range schedulers {
|
||||||
|
|
@ -635,6 +666,26 @@ func schedulersToAvailableNetAddrs(schedulers []*manager.Scheduler) []dfnet.NetA
|
||||||
return netAddrs
|
return netAddrs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// announceSeedPeer announces seed peer to manager.
|
||||||
|
func (cd *clientDaemon) announceSeedPeer() error {
|
||||||
|
if _, err := cd.managerClient.UpdateSeedPeer(&manager.UpdateSeedPeerRequest{
|
||||||
|
SourceType: manager.SourceType_SEED_PEER_SOURCE,
|
||||||
|
HostName: cd.Option.Host.Hostname,
|
||||||
|
Type: cd.Option.Scheduler.Manager.SeedPeer.Type,
|
||||||
|
Idc: cd.Option.Host.IDC,
|
||||||
|
NetTopology: cd.Option.Host.NetTopology,
|
||||||
|
Location: cd.Option.Host.Location,
|
||||||
|
Ip: cd.Option.Host.AdvertiseIP,
|
||||||
|
Port: cd.schedPeerHost.RpcPort,
|
||||||
|
DownloadPort: cd.schedPeerHost.DownPort,
|
||||||
|
SeedPeerClusterId: uint64(cd.Option.Scheduler.Manager.SeedPeer.ClusterID),
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (cd *clientDaemon) ExportTaskManager() peer.TaskManager {
|
func (cd *clientDaemon) ExportTaskManager() peer.TaskManager {
|
||||||
return cd.PeerTaskManager
|
return cd.PeerTaskManager
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -171,7 +171,7 @@ func (ptm *peerTaskManager) newPeerTaskConductor(
|
||||||
// use a new context with span info
|
// use a new context with span info
|
||||||
ctx = trace.ContextWithSpan(context.Background(), trace.SpanFromContext(ctx))
|
ctx = trace.ContextWithSpan(context.Background(), trace.SpanFromContext(ctx))
|
||||||
ctx, span := tracer.Start(ctx, config.SpanPeerTask, trace.WithSpanKind(trace.SpanKindClient))
|
ctx, span := tracer.Start(ctx, config.SpanPeerTask, trace.WithSpanKind(trace.SpanKindClient))
|
||||||
span.SetAttributes(config.AttributePeerHost.String(ptm.host.Uuid))
|
span.SetAttributes(config.AttributePeerHost.String(ptm.host.Id))
|
||||||
span.SetAttributes(semconv.NetHostIPKey.String(ptm.host.Ip))
|
span.SetAttributes(semconv.NetHostIPKey.String(ptm.host.Ip))
|
||||||
span.SetAttributes(config.AttributePeerID.String(request.PeerId))
|
span.SetAttributes(config.AttributePeerID.String(request.PeerId))
|
||||||
span.SetAttributes(semconv.HTTPURLKey.String(request.Url))
|
span.SetAttributes(semconv.HTTPURLKey.String(request.Url))
|
||||||
|
|
|
||||||
|
|
@ -88,7 +88,7 @@ func (ptm *peerTaskManager) tryReuseFilePeerTask(ctx context.Context,
|
||||||
}
|
}
|
||||||
|
|
||||||
_, span := tracer.Start(ctx, config.SpanReusePeerTask, trace.WithSpanKind(trace.SpanKindClient))
|
_, span := tracer.Start(ctx, config.SpanReusePeerTask, trace.WithSpanKind(trace.SpanKindClient))
|
||||||
span.SetAttributes(config.AttributePeerHost.String(ptm.host.Uuid))
|
span.SetAttributes(config.AttributePeerHost.String(ptm.host.Id))
|
||||||
span.SetAttributes(semconv.NetHostIPKey.String(ptm.host.Ip))
|
span.SetAttributes(semconv.NetHostIPKey.String(ptm.host.Ip))
|
||||||
span.SetAttributes(config.AttributeTaskID.String(taskID))
|
span.SetAttributes(config.AttributeTaskID.String(taskID))
|
||||||
span.SetAttributes(config.AttributePeerID.String(request.PeerId))
|
span.SetAttributes(config.AttributePeerID.String(request.PeerId))
|
||||||
|
|
@ -220,7 +220,7 @@ func (ptm *peerTaskManager) tryReuseStreamPeerTask(ctx context.Context,
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx, span := tracer.Start(ctx, config.SpanStreamTask, trace.WithSpanKind(trace.SpanKindClient))
|
ctx, span := tracer.Start(ctx, config.SpanStreamTask, trace.WithSpanKind(trace.SpanKindClient))
|
||||||
span.SetAttributes(config.AttributePeerHost.String(ptm.host.Uuid))
|
span.SetAttributes(config.AttributePeerHost.String(ptm.host.Id))
|
||||||
span.SetAttributes(semconv.NetHostIPKey.String(ptm.host.Ip))
|
span.SetAttributes(semconv.NetHostIPKey.String(ptm.host.Ip))
|
||||||
span.SetAttributes(config.AttributeTaskID.String(taskID))
|
span.SetAttributes(config.AttributeTaskID.String(taskID))
|
||||||
span.SetAttributes(config.AttributePeerID.String(request.PeerID))
|
span.SetAttributes(config.AttributePeerID.String(request.PeerID))
|
||||||
|
|
@ -280,9 +280,11 @@ func (ptm *peerTaskManager) tryReuseSeedPeerTask(ctx context.Context,
|
||||||
}
|
}
|
||||||
|
|
||||||
if reuse == nil {
|
if reuse == nil {
|
||||||
if request.Range == nil {
|
return nil, false
|
||||||
return nil, false
|
|
||||||
}
|
// if request.Range == nil {
|
||||||
|
// return nil, false
|
||||||
|
// }
|
||||||
// TODO, mock SeedTaskResponse for sub task
|
// TODO, mock SeedTaskResponse for sub task
|
||||||
// for ranged request, check the parent task
|
// for ranged request, check the parent task
|
||||||
//reuseRange = request.Range
|
//reuseRange = request.Range
|
||||||
|
|
@ -304,7 +306,7 @@ func (ptm *peerTaskManager) tryReuseSeedPeerTask(ctx context.Context,
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx, span := tracer.Start(ctx, config.SpanReusePeerTask, trace.WithSpanKind(trace.SpanKindClient))
|
ctx, span := tracer.Start(ctx, config.SpanReusePeerTask, trace.WithSpanKind(trace.SpanKindClient))
|
||||||
span.SetAttributes(config.AttributePeerHost.String(ptm.host.Uuid))
|
span.SetAttributes(config.AttributePeerHost.String(ptm.host.Id))
|
||||||
span.SetAttributes(semconv.NetHostIPKey.String(ptm.host.Ip))
|
span.SetAttributes(semconv.NetHostIPKey.String(ptm.host.Ip))
|
||||||
span.SetAttributes(config.AttributeTaskID.String(taskID))
|
span.SetAttributes(config.AttributeTaskID.String(taskID))
|
||||||
span.SetAttributes(config.AttributePeerID.String(request.PeerId))
|
span.SetAttributes(config.AttributePeerID.String(request.PeerId))
|
||||||
|
|
|
||||||
|
|
@ -267,7 +267,7 @@ func (proxy *Proxy) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
defer metrics.ProxyRequestRunningCount.WithLabelValues(r.Method).Sub(1)
|
defer metrics.ProxyRequestRunningCount.WithLabelValues(r.Method).Sub(1)
|
||||||
|
|
||||||
ctx, span := proxy.tracer.Start(r.Context(), config.SpanProxy)
|
ctx, span := proxy.tracer.Start(r.Context(), config.SpanProxy)
|
||||||
span.SetAttributes(config.AttributePeerHost.String(proxy.peerHost.Uuid))
|
span.SetAttributes(config.AttributePeerHost.String(proxy.peerHost.Id))
|
||||||
span.SetAttributes(semconv.NetHostIPKey.String(proxy.peerHost.Ip))
|
span.SetAttributes(semconv.NetHostIPKey.String(proxy.peerHost.Ip))
|
||||||
span.SetAttributes(semconv.HTTPSchemeKey.String(r.URL.Scheme))
|
span.SetAttributes(semconv.HTTPSchemeKey.String(r.URL.Scheme))
|
||||||
span.SetAttributes(semconv.HTTPHostKey.String(r.Host))
|
span.SetAttributes(semconv.HTTPHostKey.String(r.Host))
|
||||||
|
|
|
||||||
|
|
@ -59,7 +59,7 @@ func (s *seeder) ObtainSeeds(seedRequest *cdnsystem.SeedRequest, seedsServer cdn
|
||||||
PeerTaskRequest: scheduler.PeerTaskRequest{
|
PeerTaskRequest: scheduler.PeerTaskRequest{
|
||||||
Url: seedRequest.Url,
|
Url: seedRequest.Url,
|
||||||
UrlMeta: seedRequest.UrlMeta,
|
UrlMeta: seedRequest.UrlMeta,
|
||||||
PeerId: idgen.PeerID(s.server.peerHost.Ip),
|
PeerId: idgen.SeedPeerID(s.server.peerHost.Ip),
|
||||||
PeerHost: s.server.peerHost,
|
PeerHost: s.server.peerHost,
|
||||||
HostLoad: nil,
|
HostLoad: nil,
|
||||||
IsMigrating: false,
|
IsMigrating: false,
|
||||||
|
|
@ -94,8 +94,8 @@ func (s *seeder) ObtainSeeds(seedRequest *cdnsystem.SeedRequest, seedsServer cdn
|
||||||
|
|
||||||
err = seedsServer.Send(
|
err = seedsServer.Send(
|
||||||
&cdnsystem.PieceSeed{
|
&cdnsystem.PieceSeed{
|
||||||
PeerId: req.PeerId,
|
PeerId: req.PeerId,
|
||||||
HostUuid: req.PeerHost.Uuid,
|
HostId: req.PeerHost.Id,
|
||||||
PieceInfo: &base.PieceInfo{
|
PieceInfo: &base.PieceInfo{
|
||||||
PieceNum: common.BeginOfPiece,
|
PieceNum: common.BeginOfPiece,
|
||||||
},
|
},
|
||||||
|
|
@ -248,8 +248,8 @@ func (s *seedSynchronizer) sendOrderedPieceSeeds(desired, orderedNum int32, fini
|
||||||
|
|
||||||
func (s *seedSynchronizer) compositePieceSeed(pp *base.PiecePacket, piece *base.PieceInfo) cdnsystem.PieceSeed {
|
func (s *seedSynchronizer) compositePieceSeed(pp *base.PiecePacket, piece *base.PieceInfo) cdnsystem.PieceSeed {
|
||||||
return cdnsystem.PieceSeed{
|
return cdnsystem.PieceSeed{
|
||||||
PeerId: s.seedTaskRequest.PeerId,
|
PeerId: s.seedTaskRequest.PeerId,
|
||||||
HostUuid: s.seedTaskRequest.PeerHost.Uuid,
|
HostId: s.seedTaskRequest.PeerHost.Id,
|
||||||
PieceInfo: &base.PieceInfo{
|
PieceInfo: &base.PieceInfo{
|
||||||
PieceNum: piece.PieceNum,
|
PieceNum: piece.PieceNum,
|
||||||
RangeStart: piece.RangeStart,
|
RangeStart: piece.RangeStart,
|
||||||
|
|
|
||||||
|
|
@ -369,12 +369,11 @@ func setupSeederServerAndClient(t *testing.T, srv *server, sd *seeder, assert *t
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
client, err := cdnclient.GetClientByAddr([]dfnet.NetAddr{
|
client := cdnclient.GetClientByAddr([]dfnet.NetAddr{
|
||||||
{
|
{
|
||||||
Type: dfnet.TCP,
|
Type: dfnet.TCP,
|
||||||
Addr: fmt.Sprintf(":%d", port),
|
Addr: fmt.Sprintf(":%d", port),
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
assert.Nil(err, "grpc dial should be ok")
|
|
||||||
return port, client
|
return port, client
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,4 @@
|
||||||
cdn.json
|
|
||||||
cdn.yaml
|
|
||||||
dfget.yaml
|
dfget.yaml
|
||||||
manager.yaml
|
manager.yaml
|
||||||
scheduler.yaml
|
scheduler.yaml
|
||||||
|
seed-peer.yaml
|
||||||
|
|
|
||||||
|
|
@ -1,51 +0,0 @@
|
||||||
worker_rlimit_nofile 100000;
|
|
||||||
|
|
||||||
events {
|
|
||||||
use epoll;
|
|
||||||
worker_connections 20480;
|
|
||||||
}
|
|
||||||
|
|
||||||
http {
|
|
||||||
include mime.types;
|
|
||||||
default_type application/octet-stream;
|
|
||||||
root /home/admin/cai/htdocs;
|
|
||||||
sendfile on;
|
|
||||||
tcp_nopush on;
|
|
||||||
|
|
||||||
server_tokens off;
|
|
||||||
keepalive_timeout 5;
|
|
||||||
|
|
||||||
client_header_timeout 1m;
|
|
||||||
send_timeout 1m;
|
|
||||||
client_max_body_size 3m;
|
|
||||||
|
|
||||||
index index.html index.htm;
|
|
||||||
access_log off;
|
|
||||||
log_not_found off;
|
|
||||||
|
|
||||||
gzip on;
|
|
||||||
gzip_http_version 1.0;
|
|
||||||
gzip_comp_level 6;
|
|
||||||
gzip_min_length 1024;
|
|
||||||
gzip_proxied any;
|
|
||||||
gzip_vary on;
|
|
||||||
gzip_disable msie6;
|
|
||||||
gzip_buffers 96 8k;
|
|
||||||
gzip_types text/xml text/plain text/css application/javascript application/x-javascript application/rss+xml application/json;
|
|
||||||
|
|
||||||
proxy_set_header Host $host;
|
|
||||||
proxy_set_header X-Real-IP $remote_addr;
|
|
||||||
proxy_set_header Web-Server-Type nginx;
|
|
||||||
proxy_set_header WL-Proxy-Client-IP $remote_addr;
|
|
||||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
|
||||||
proxy_redirect off;
|
|
||||||
proxy_buffers 128 8k;
|
|
||||||
proxy_intercept_errors on;
|
|
||||||
|
|
||||||
server {
|
|
||||||
listen 8001;
|
|
||||||
location / {
|
|
||||||
root /tmp/cdn;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -28,7 +28,7 @@ services:
|
||||||
retries: 30
|
retries: 30
|
||||||
|
|
||||||
manager:
|
manager:
|
||||||
image: dragonflyoss/manager:v2.0.2
|
image: dragonflyoss/manager:v2.0.3-beta.1
|
||||||
container_name: manager
|
container_name: manager
|
||||||
network_mode: host
|
network_mode: host
|
||||||
depends_on:
|
depends_on:
|
||||||
|
|
@ -45,10 +45,11 @@ services:
|
||||||
retries: 30
|
retries: 30
|
||||||
|
|
||||||
dfdaemon:
|
dfdaemon:
|
||||||
image: dragonflyoss/dfdaemon:v2.0.2
|
image: dragonflyoss/dfdaemon:v2.0.3-beta.1
|
||||||
depends_on:
|
depends_on:
|
||||||
|
- manager
|
||||||
- scheduler
|
- scheduler
|
||||||
- cdn
|
- seed-peer
|
||||||
container_name: dfdaemon
|
container_name: dfdaemon
|
||||||
network_mode: host
|
network_mode: host
|
||||||
restart: always
|
restart: always
|
||||||
|
|
@ -62,9 +63,8 @@ services:
|
||||||
- ./config/dfget.yaml:/etc/dragonfly/dfget.yaml:ro
|
- ./config/dfget.yaml:/etc/dragonfly/dfget.yaml:ro
|
||||||
|
|
||||||
scheduler:
|
scheduler:
|
||||||
image: dragonflyoss/scheduler:v2.0.2
|
image: dragonflyoss/scheduler:v2.0.3-beta.1
|
||||||
depends_on:
|
depends_on:
|
||||||
- cdn
|
|
||||||
- manager
|
- manager
|
||||||
container_name: scheduler
|
container_name: scheduler
|
||||||
network_mode: host
|
network_mode: host
|
||||||
|
|
@ -78,17 +78,19 @@ services:
|
||||||
- /tmp/log/dragonfly:/var/log/dragonfly
|
- /tmp/log/dragonfly:/var/log/dragonfly
|
||||||
- ./config/scheduler.yaml:/etc/dragonfly/scheduler.yaml:ro
|
- ./config/scheduler.yaml:/etc/dragonfly/scheduler.yaml:ro
|
||||||
|
|
||||||
cdn:
|
seed-peer:
|
||||||
image: dragonflyoss/cdn:v2.0.2
|
image: dragonflyoss/dfdaemon:v2.0.3-beta.1
|
||||||
container_name: cdn
|
depends_on:
|
||||||
|
- manager
|
||||||
|
- scheduler
|
||||||
|
container_name: seed-peer
|
||||||
network_mode: host
|
network_mode: host
|
||||||
restart: always
|
restart: always
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD-SHELL", "/bin/grpc_health_probe -addr=:8003 || exit 1"]
|
test: ["CMD-SHELL", "/bin/grpc_health_probe -addr=:65100 || exit 1"]
|
||||||
interval: 1s
|
interval: 1s
|
||||||
timeout: 3s
|
timeout: 3s
|
||||||
retries: 30
|
retries: 30
|
||||||
volumes:
|
volumes:
|
||||||
- /tmp/log/dragonfly:/var/log/dragonfly
|
- /tmp/log/dragonfly:/var/log/dragonfly
|
||||||
- ./config/cdn.yaml:/etc/dragonfly/cdn.yaml:ro
|
- ./config/seed-peer.yaml:/etc/dragonfly/dfget.yaml:ro
|
||||||
- ./config/nginx.conf:/etc/nginx/nginx.conf:ro
|
|
||||||
|
|
|
||||||
|
|
@ -14,7 +14,7 @@ prepare(){
|
||||||
ip=${IP:-$(hostname -i)}
|
ip=${IP:-$(hostname -i)}
|
||||||
|
|
||||||
sed "s,__IP__,$ip," template/dfget.template.yaml > config/dfget.yaml
|
sed "s,__IP__,$ip," template/dfget.template.yaml > config/dfget.yaml
|
||||||
sed "s,__IP__,$ip," template/cdn.template.yaml > config/cdn.yaml
|
sed "s,__IP__,$ip," template/seed-peer.template.yaml > config/seed-peer.yaml
|
||||||
sed "s,__IP__,$ip," template/scheduler.template.yaml > config/scheduler.yaml
|
sed "s,__IP__,$ip," template/scheduler.template.yaml > config/scheduler.yaml
|
||||||
sed "s,__IP__,$ip," template/manager.template.yaml > config/manager.yaml
|
sed "s,__IP__,$ip," template/manager.template.yaml > config/manager.yaml
|
||||||
}
|
}
|
||||||
|
|
@ -24,20 +24,19 @@ run_container(){
|
||||||
echo use container runtime: ${RUNTIME}
|
echo use container runtime: ${RUNTIME}
|
||||||
|
|
||||||
echo try to clean old containers
|
echo try to clean old containers
|
||||||
${RUNTIME} rm -f dragonfly-cdn dragonfly-scheduler dragonfly-dfdaemon
|
${RUNTIME} rm -f dragonfly-manager dragonfly-scheduler dragonfly-dfdaemon
|
||||||
|
|
||||||
printf "create dragonfly-manager "
|
printf "create dragonfly-manager "
|
||||||
${RUNTIME} run -d --name dragonfly-cdn --net=host \
|
${RUNTIME} run -d --name dragonfly-manager --net=host \
|
||||||
-v /tmp/log/dragonfly:/var/log/dragonfly \
|
-v /tmp/log/dragonfly:/var/log/dragonfly \
|
||||||
-v ${DIR}/config/manager.yaml:/etc/dragonfly/manager.yaml \
|
-v ${DIR}/config/manager.yaml:/etc/dragonfly/manager.yaml \
|
||||||
${REPO}/manager:${TAG}
|
${REPO}/manager:${TAG}
|
||||||
|
|
||||||
printf "create dragonfly-cdn "
|
printf "create dragonfly-seed-peer "
|
||||||
${RUNTIME} run -d --name dragonfly-cdn --net=host \
|
${RUNTIME} run -d --name dragonfly-seed-peer --net=host \
|
||||||
-v /tmp/log/dragonfly:/var/log/dragonfly \
|
-v /tmp/log/dragonfly:/var/log/dragonfly \
|
||||||
-v ${DIR}/config/cdn.yaml:/etc/dragonfly/cdn.yaml \
|
-v ${DIR}/config/seed-peer.yaml:/etc/dragonfly/dfget.yaml \
|
||||||
-v ${DIR}/config/nginx.conf:/etc/nginx/nginx.conf \
|
${REPO}/dfdaemon:${TAG}
|
||||||
${REPO}/cdn:${TAG}
|
|
||||||
|
|
||||||
printf "create dragonfly-scheduler "
|
printf "create dragonfly-scheduler "
|
||||||
${RUNTIME} run -d --name dragonfly-scheduler --net=host \
|
${RUNTIME} run -d --name dragonfly-scheduler --net=host \
|
||||||
|
|
|
||||||
|
|
@ -1,129 +0,0 @@
|
||||||
# This file is the template of cdn system configuration file.
|
|
||||||
# You can configure your cdn system by change the parameter according your requirement.
|
|
||||||
---
|
|
||||||
base:
|
|
||||||
# listenPort is the port cdn server listens on.
|
|
||||||
# default: 8003
|
|
||||||
listenPort: 8003
|
|
||||||
|
|
||||||
# DownloadPort is the port for download files from cdn.
|
|
||||||
# And you should start a file server firstly which listens on the download port.
|
|
||||||
# default: 8001
|
|
||||||
downloadPort: 8001
|
|
||||||
|
|
||||||
# SystemReservedBandwidth is the network bandwidth reserved for system software.
|
|
||||||
# default: 20 MB, in format of G(B)/g/M(B)/m/K(B)/k/B, pure number will also be parsed as Byte.
|
|
||||||
systemReservedBandwidth: 20M
|
|
||||||
|
|
||||||
# MaxBandwidth is the network bandwidth that cdn can use.
|
|
||||||
# default: 1G, in format of G(B)/g/M(B)/m/K(B)/k/B, pure number will also be parsed as Byte.
|
|
||||||
maxBandwidth: 1G
|
|
||||||
|
|
||||||
# AdvertiseIP is used to set the ip that we advertise to other peer in the p2p-network.
|
|
||||||
# By default, the first non-loop address is advertised.
|
|
||||||
advertiseIP: __IP__
|
|
||||||
|
|
||||||
# FailAccessInterval is the interval time after failed to access the URL.
|
|
||||||
# If a task failed to be downloaded from the source, it will not be retried in the time since the last failure.
|
|
||||||
# default: 3m
|
|
||||||
failAccessInterval: 3m
|
|
||||||
|
|
||||||
# GCInitialDelay is the delay time from the start to the first GC execution.
|
|
||||||
# default: 6s
|
|
||||||
gcInitialDelay: 6s
|
|
||||||
|
|
||||||
# GCMetaInterval is the interval time to execute GC meta.
|
|
||||||
# default: 2m0s
|
|
||||||
gcMetaInterval: 2m
|
|
||||||
|
|
||||||
# TaskExpireTime when a task is not accessed within the taskExpireTime,
|
|
||||||
# and it will be treated to be expired.
|
|
||||||
# default: 3m0s
|
|
||||||
taskExpireTime: 3m
|
|
||||||
|
|
||||||
# storageMode is the Mode of storage policy, [disk/hybrid]
|
|
||||||
storageMode: disk
|
|
||||||
|
|
||||||
|
|
||||||
# logDir is the log storage directory
|
|
||||||
# in linux, default value is /var/log/dragonfly
|
|
||||||
# in macos(just for testing), default value is /Users/$USER/.dragonfly/logs
|
|
||||||
logDir: ""
|
|
||||||
|
|
||||||
# manager configuration
|
|
||||||
manager:
|
|
||||||
addr: "__IP__:65003"
|
|
||||||
cdnClusterID: "1"
|
|
||||||
keepAlive:
|
|
||||||
interval: 5s
|
|
||||||
|
|
||||||
# host configuration
|
|
||||||
host:
|
|
||||||
location:
|
|
||||||
idc:
|
|
||||||
|
|
||||||
# enable prometheus metrics
|
|
||||||
# metrics:
|
|
||||||
# # metrics service address
|
|
||||||
# addr: ":8000"
|
|
||||||
|
|
||||||
|
|
||||||
plugins:
|
|
||||||
storageDriver:
|
|
||||||
- name: disk
|
|
||||||
enable: true
|
|
||||||
config:
|
|
||||||
baseDir: /tmp/cdn
|
|
||||||
- name: memory
|
|
||||||
enable: false
|
|
||||||
config:
|
|
||||||
baseDir: /dev/shm/dragonfly
|
|
||||||
|
|
||||||
storagemanager:
|
|
||||||
- name: disk
|
|
||||||
enable: true
|
|
||||||
config:
|
|
||||||
gcInitialDelay: 0s
|
|
||||||
gcInterval: 15s
|
|
||||||
driverConfigs:
|
|
||||||
disk:
|
|
||||||
gcConfig:
|
|
||||||
youngGCThreshold: 100.0GB
|
|
||||||
fullGCThreshold: 5.0GB
|
|
||||||
cleanRatio: 1
|
|
||||||
intervalThreshold: 2h0m0s
|
|
||||||
- name: hybrid
|
|
||||||
enable: false
|
|
||||||
config:
|
|
||||||
gcInitialDelay: 0s
|
|
||||||
gcInterval: 15s
|
|
||||||
driverConfigs:
|
|
||||||
disk:
|
|
||||||
gcConfig:
|
|
||||||
youngGCThreshold: 100.0GB
|
|
||||||
fullGCThreshold: 5.0GB
|
|
||||||
cleanRatio: 1
|
|
||||||
intervalThreshold: 2h0m0s
|
|
||||||
memory:
|
|
||||||
gcConfig:
|
|
||||||
youngGCThreshold: 100.0GB
|
|
||||||
fullGCThreshold: 5.0GB
|
|
||||||
cleanRatio: 3
|
|
||||||
intervalThreshold: 2h0m0s
|
|
||||||
|
|
||||||
# console shows log on console
|
|
||||||
console: false
|
|
||||||
|
|
||||||
# whether to enable debug level logger and enable pprof
|
|
||||||
verbose: true
|
|
||||||
|
|
||||||
# listen port for pprof, only valid when the verbose option is true
|
|
||||||
# default is -1. If it is 0, pprof will use a random port.
|
|
||||||
pprof-port: -1
|
|
||||||
|
|
||||||
# jaeger endpoint url, like: http://jaeger.dragonfly.svc:14268/api/traces
|
|
||||||
jaeger: ""
|
|
||||||
|
|
||||||
# service name used in tracer
|
|
||||||
# default: dragonfly-cdn
|
|
||||||
service-name: dragonfly-cdn
|
|
||||||
|
|
@ -26,7 +26,7 @@ scheduler:
|
||||||
# the dragonfly working directory plugins
|
# the dragonfly working directory plugins
|
||||||
algorithm: default
|
algorithm: default
|
||||||
# backSourceCount is the number of backsource clients
|
# backSourceCount is the number of backsource clients
|
||||||
# when the CDN is unavailable
|
# when the seed peer is unavailable
|
||||||
backSourceCount: 3
|
backSourceCount: 3
|
||||||
# retry scheduling back-to-source limit times
|
# retry scheduling back-to-source limit times
|
||||||
retryBackSourceLimit: 5
|
retryBackSourceLimit: 5
|
||||||
|
|
@ -74,13 +74,6 @@ manager:
|
||||||
# interval
|
# interval
|
||||||
interval: 5s
|
interval: 5s
|
||||||
|
|
||||||
# cdn configuration
|
|
||||||
cdn:
|
|
||||||
# scheduler enable cdn as P2P peer,
|
|
||||||
# if the value is false, P2P network will not be back-to-source through
|
|
||||||
# cdn but by dfdaemon and preheat feature does not work
|
|
||||||
enable: true
|
|
||||||
|
|
||||||
# machinery async job configuration,
|
# machinery async job configuration,
|
||||||
# see https://github.com/RichardKnop/machinery
|
# see https://github.com/RichardKnop/machinery
|
||||||
job:
|
job:
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,180 @@
|
||||||
|
# daemon alive time, when sets 0s, daemon will not auto exit
|
||||||
|
# it is useful for longtime running
|
||||||
|
aliveTime: 0s
|
||||||
|
|
||||||
|
# daemon gc task running interval
|
||||||
|
gcInterval: 1m0s
|
||||||
|
|
||||||
|
# daemon work directory, daemon will change current working directory to this
|
||||||
|
# in linux, default value is /usr/local/dragonfly
|
||||||
|
# in macos(just for testing), default value is /Users/$USER/.dragonfly
|
||||||
|
workHome: ""
|
||||||
|
|
||||||
|
# cacheDir is dynconfig cache storage directory
|
||||||
|
# in linux, default value is /var/cache/dragonfly
|
||||||
|
# in macos(just for testing), default value is /Users/$USER/.dragonfly/cache
|
||||||
|
cacheDir: ""
|
||||||
|
|
||||||
|
# logDir is the log storage directory
|
||||||
|
# in linux, default value is /var/log/dragonfly
|
||||||
|
# in macos(just for testing), default value is /Users/$USER/.dragonfly/logs
|
||||||
|
logDir: ""
|
||||||
|
|
||||||
|
# dataDir is the download data storage directory
|
||||||
|
# in linux, default value is /var/lib/dragonfly
|
||||||
|
# in macos(just for testing), default value is /Users/$USER/.dragonfly/data
|
||||||
|
dataDir: ""
|
||||||
|
|
||||||
|
# when daemon exit, keep peer task data or not
|
||||||
|
# it is usefully when upgrade daemon service, all local cache will be saved
|
||||||
|
# default is false
|
||||||
|
keepStorage: true
|
||||||
|
|
||||||
|
# console shows log on console
|
||||||
|
console: false
|
||||||
|
|
||||||
|
# whether to enable debug level logger and enable pprof
|
||||||
|
verbose: true
|
||||||
|
|
||||||
|
# listen port for pprof, only valid when the verbose option is true
|
||||||
|
# default is -1. If it is 0, pprof will use a random port.
|
||||||
|
pprof-port: -1
|
||||||
|
|
||||||
|
# jaeger endpoint url, like: http://jaeger.dragonfly.svc:14268/api/traces
|
||||||
|
jaeger: ""
|
||||||
|
|
||||||
|
# all addresses of all schedulers
|
||||||
|
# the schedulers of all daemons should be same in one region or zone.
|
||||||
|
# daemon will send tasks to a fixed scheduler by hashing the task url and meta data
|
||||||
|
# caution: only tcp is supported
|
||||||
|
scheduler:
|
||||||
|
manager:
|
||||||
|
# get scheduler list dynamically from manager
|
||||||
|
enable: true
|
||||||
|
# manager service addresses
|
||||||
|
netAddrs:
|
||||||
|
- type: tcp
|
||||||
|
addr: __IP__:65003
|
||||||
|
# scheduler list refresh interval
|
||||||
|
refreshInterval: 10s
|
||||||
|
seedPeer:
|
||||||
|
enable: true
|
||||||
|
type: "super"
|
||||||
|
clusterID: 1
|
||||||
|
# schedule timeout
|
||||||
|
scheduleTimeout: 30s
|
||||||
|
# when true, only scheduler says back source, daemon can back source
|
||||||
|
disableAutoBackSource: false
|
||||||
|
# below example is a stand address
|
||||||
|
netAddrs:
|
||||||
|
- type: tcp
|
||||||
|
addr: __IP__:8002
|
||||||
|
|
||||||
|
# current host info used for scheduler
|
||||||
|
host:
|
||||||
|
# tcp service listen address
|
||||||
|
# port should be set by other options
|
||||||
|
listenIP: 0.0.0.0
|
||||||
|
# access ip for other peers
|
||||||
|
# when local ip is different with access ip, advertiseIP should be set
|
||||||
|
advertiseIP: __IP__
|
||||||
|
# geographical location, separated by "|" characters
|
||||||
|
location: ""
|
||||||
|
# idc deployed by daemon
|
||||||
|
idc: ""
|
||||||
|
# security domain deployed by daemon, network isolation between different security domains
|
||||||
|
securityDomain: ""
|
||||||
|
# network topology, separated by "|" characters
|
||||||
|
netTopology: ""
|
||||||
|
# daemon hostname
|
||||||
|
# hostname: ""
|
||||||
|
|
||||||
|
# download service option
|
||||||
|
download:
|
||||||
|
# calculate digest when transfer files, set false to save memory
|
||||||
|
calculateDigest: true
|
||||||
|
# total download limit per second
|
||||||
|
totalRateLimit: 2048Mi
|
||||||
|
# per peer task download limit per second
|
||||||
|
perPeerRateLimit: 1024Mi
|
||||||
|
# download piece timeout
|
||||||
|
pieceDownloadTimeout: 30s
|
||||||
|
# golang transport option
|
||||||
|
transportOption:
|
||||||
|
# dial timeout
|
||||||
|
dialTimeout: 2s
|
||||||
|
# keep alive
|
||||||
|
keepAlive: 30s
|
||||||
|
# same with http.Transport.MaxIdleConns
|
||||||
|
maxIdleConns: 100
|
||||||
|
# same with http.Transport.IdleConnTimeout
|
||||||
|
idleConnTimeout: 90s
|
||||||
|
# same with http.Transport.ResponseHeaderTimeout
|
||||||
|
responseHeaderTimeout: 2s
|
||||||
|
# same with http.Transport.TLSHandshakeTimeout
|
||||||
|
tlsHandshakeTimeout: 1s
|
||||||
|
# same with http.Transport.ExpectContinueTimeout
|
||||||
|
expectContinueTimeout: 2s
|
||||||
|
# download grpc option
|
||||||
|
downloadGRPC:
|
||||||
|
# security option
|
||||||
|
security:
|
||||||
|
insecure: true
|
||||||
|
# download service listen address
|
||||||
|
# current, only support unix domain socket
|
||||||
|
unixListen:
|
||||||
|
# in linux, default value is /var/run/dfdaemon.sock
|
||||||
|
# in macos(just for testing), default value is /tmp/dfdaemon.sock
|
||||||
|
socket: /var/run/dfdaemon.sock
|
||||||
|
# peer grpc option
|
||||||
|
# peer grpc service send pieces info to other peers
|
||||||
|
peerGRPC:
|
||||||
|
security:
|
||||||
|
insecure: true
|
||||||
|
tcpListen:
|
||||||
|
# listen address
|
||||||
|
listen: 0.0.0.0
|
||||||
|
# listen port, daemon will try to listen
|
||||||
|
# when this port is not available, daemon will try next port
|
||||||
|
port: 65100
|
||||||
|
# if want to limit upper port, please use blow format
|
||||||
|
# port:
|
||||||
|
# start: 65000
|
||||||
|
# end: 65009
|
||||||
|
|
||||||
|
# upload service option
|
||||||
|
upload:
|
||||||
|
# upload limit per second
|
||||||
|
rateLimit: 2048Mi
|
||||||
|
security:
|
||||||
|
insecure: true
|
||||||
|
tcpListen:
|
||||||
|
# listen address
|
||||||
|
listen: 0.0.0.0
|
||||||
|
# listen port, daemon will try to listen
|
||||||
|
# when this port is not available, daemon will try next port
|
||||||
|
port: 65102
|
||||||
|
# if want to limit upper port, please use blow format
|
||||||
|
# port:
|
||||||
|
# start: 65020
|
||||||
|
# end: 65029
|
||||||
|
|
||||||
|
# peer task storage option
|
||||||
|
storage:
|
||||||
|
# task data expire time
|
||||||
|
# when there is no access to a task data, this task will be gc.
|
||||||
|
taskExpireTime: 6h
|
||||||
|
# storage strategy when process task data
|
||||||
|
# io.d7y.storage.v2.simple : download file to data directory first, then copy to output path, this is default action
|
||||||
|
# the download file in date directory will be the peer data for uploading to other peers
|
||||||
|
# io.d7y.storage.v2.advance: download file directly to output path with postfix, hard link to final output,
|
||||||
|
# avoid copy to output path, fast than simple strategy, but:
|
||||||
|
# the output file with postfix will be the peer data for uploading to other peers
|
||||||
|
# when user delete or change this file, this peer data will be corrupted
|
||||||
|
# default is io.d7y.storage.v2.advance
|
||||||
|
strategy: io.d7y.storage.v2.advance
|
||||||
|
# disk used percent gc threshold, when the disk used percent exceeds, the oldest tasks will be reclaimed.
|
||||||
|
# eg, diskGCThresholdPercent=90, when the disk usage is above 80%, start to gc the oldest tasks
|
||||||
|
diskGCThresholdPercent: 90
|
||||||
|
# set to ture for reusing underlying storage for same task id
|
||||||
|
multiplex: true
|
||||||
|
|
@ -1 +1 @@
|
||||||
Subproject commit bff751a963c4c278c35dfb240eb7cb0ca52eef38
|
Subproject commit 34cf932be50b23ccb4f0f6cb501598dc47a06d7f
|
||||||
|
|
@ -20,7 +20,6 @@ package job
|
||||||
const (
|
const (
|
||||||
GlobalQueue = Queue("global")
|
GlobalQueue = Queue("global")
|
||||||
SchedulersQueue = Queue("schedulers")
|
SchedulersQueue = Queue("schedulers")
|
||||||
CDNsQueue = Queue("cdns")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Job Name
|
// Job Name
|
||||||
|
|
|
||||||
|
|
@ -36,18 +36,6 @@ func GetSchedulerQueue(clusterID uint, hostname string) (Queue, error) {
|
||||||
return Queue(fmt.Sprintf("scheduler_%d_%s", clusterID, hostname)), nil
|
return Queue(fmt.Sprintf("scheduler_%d_%s", clusterID, hostname)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetCDNQueue(clusterID uint, hostname string) (Queue, error) {
|
|
||||||
if clusterID == 0 {
|
|
||||||
return Queue(""), errors.New("empty cluster id config is not specified")
|
|
||||||
}
|
|
||||||
|
|
||||||
if hostname == "" {
|
|
||||||
return Queue(""), errors.New("empty hostname config is not specified")
|
|
||||||
}
|
|
||||||
|
|
||||||
return Queue(fmt.Sprintf("cdn_%d_%s", clusterID, hostname)), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q Queue) String() string {
|
func (q Queue) String() string {
|
||||||
return string(q)
|
return string(q)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -65,47 +65,3 @@ func TestJobGetSchedulerQueue(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestJobGetCDNQueue(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
clusterID uint
|
|
||||||
hostname string
|
|
||||||
expect func(t *testing.T, result Queue, err error)
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "get cdn queue",
|
|
||||||
clusterID: 1,
|
|
||||||
hostname: "foo",
|
|
||||||
expect: func(t *testing.T, result Queue, err error) {
|
|
||||||
assert := assert.New(t)
|
|
||||||
assert.Equal(Queue("cdn_1_foo"), result)
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "get cdn queue with empty hostname",
|
|
||||||
clusterID: 1,
|
|
||||||
hostname: "",
|
|
||||||
expect: func(t *testing.T, result Queue, err error) {
|
|
||||||
assert := assert.New(t)
|
|
||||||
assert.EqualError(err, "empty hostname config is not specified")
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "get scheduler queue with empty clusterID",
|
|
||||||
clusterID: 0,
|
|
||||||
hostname: "foo",
|
|
||||||
expect: func(t *testing.T, result Queue, err error) {
|
|
||||||
assert := assert.New(t)
|
|
||||||
assert.EqualError(err, "empty cluster id config is not specified")
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range tests {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
queue, err := GetCDNQueue(tc.clusterID, tc.hostname)
|
|
||||||
tc.expect(t, queue, err)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
||||||
|
|
@ -30,9 +30,6 @@ const (
|
||||||
// Seed Peer prefix of cache key.
|
// Seed Peer prefix of cache key.
|
||||||
SeedPeerNamespace = "seed-peer"
|
SeedPeerNamespace = "seed-peer"
|
||||||
|
|
||||||
// CDN prefix of cache key.
|
|
||||||
CDNNamespace = "cdn"
|
|
||||||
|
|
||||||
// Scheduler prefix of cache key.
|
// Scheduler prefix of cache key.
|
||||||
SchedulerNamespace = "scheduler"
|
SchedulerNamespace = "scheduler"
|
||||||
|
|
||||||
|
|
@ -79,11 +76,6 @@ func MakeSeedPeerCacheKey(hostname string, clusterID uint) string {
|
||||||
return MakeCacheKey(SeedPeerNamespace, fmt.Sprintf("%s-%d", hostname, clusterID))
|
return MakeCacheKey(SeedPeerNamespace, fmt.Sprintf("%s-%d", hostname, clusterID))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deprecated: Use MakeSeedPeerCacheKey instead.
|
|
||||||
func MakeCDNCacheKey(hostname string, clusterID uint) string {
|
|
||||||
return MakeCacheKey(CDNNamespace, fmt.Sprintf("%s-%d", hostname, clusterID))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make cache key for scheduler
|
// Make cache key for scheduler
|
||||||
func MakeSchedulerCacheKey(hostname string, clusterID uint) string {
|
func MakeSchedulerCacheKey(hostname string, clusterID uint) string {
|
||||||
return MakeCacheKey(SchedulerNamespace, fmt.Sprintf("%s-%d", hostname, clusterID))
|
return MakeCacheKey(SchedulerNamespace, fmt.Sprintf("%s-%d", hostname, clusterID))
|
||||||
|
|
|
||||||
|
|
@ -1 +1 @@
|
||||||
Subproject commit 9507a025f24729443c7339906b624a75cdafc143
|
Subproject commit bb9e98344779d1ca6f5ae989c48f9a25cb5c10e8
|
||||||
|
|
@ -98,8 +98,8 @@ func formatDSN(cfg *config.MysqlConfig) (string, error) {
|
||||||
func migrate(db *gorm.DB) error {
|
func migrate(db *gorm.DB) error {
|
||||||
return db.Set("gorm:table_options", "DEFAULT CHARSET=utf8mb4 ROW_FORMAT=Dynamic").AutoMigrate(
|
return db.Set("gorm:table_options", "DEFAULT CHARSET=utf8mb4 ROW_FORMAT=Dynamic").AutoMigrate(
|
||||||
&model.Job{},
|
&model.Job{},
|
||||||
&model.CDNCluster{},
|
&model.SeedPeerCluster{},
|
||||||
&model.CDN{},
|
&model.SeedPeer{},
|
||||||
&model.SchedulerCluster{},
|
&model.SchedulerCluster{},
|
||||||
&model.Scheduler{},
|
&model.Scheduler{},
|
||||||
&model.SecurityRule{},
|
&model.SecurityRule{},
|
||||||
|
|
@ -112,25 +112,6 @@ func migrate(db *gorm.DB) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func seed(db *gorm.DB) error {
|
func seed(db *gorm.DB) error {
|
||||||
var cdnClusterCount int64
|
|
||||||
if err := db.Model(model.CDNCluster{}).Count(&cdnClusterCount).Error; err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if cdnClusterCount <= 0 {
|
|
||||||
if err := db.Create(&model.CDNCluster{
|
|
||||||
Model: model.Model{
|
|
||||||
ID: uint(1),
|
|
||||||
},
|
|
||||||
Name: "cdn-cluster-1",
|
|
||||||
Config: map[string]interface{}{
|
|
||||||
"load_limit": schedulerconfig.DefaultCDNLoadLimit,
|
|
||||||
},
|
|
||||||
IsDefault: true,
|
|
||||||
}).Error; err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var schedulerClusterCount int64
|
var schedulerClusterCount int64
|
||||||
if err := db.Model(model.SchedulerCluster{}).Count(&schedulerClusterCount).Error; err != nil {
|
if err := db.Model(model.SchedulerCluster{}).Count(&schedulerClusterCount).Error; err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
@ -155,9 +136,26 @@ func seed(db *gorm.DB) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if schedulerClusterCount == 0 && cdnClusterCount == 0 {
|
var seedPeerClusterCount int64
|
||||||
cdnCluster := model.CDNCluster{}
|
if err := db.Model(model.SeedPeerCluster{}).Count(&seedPeerClusterCount).Error; err != nil {
|
||||||
if err := db.First(&cdnCluster).Error; err != nil {
|
return err
|
||||||
|
}
|
||||||
|
if seedPeerClusterCount <= 0 {
|
||||||
|
if err := db.Create(&model.SeedPeerCluster{
|
||||||
|
Model: model.Model{
|
||||||
|
ID: uint(1),
|
||||||
|
},
|
||||||
|
Name: "seed-peer-cluster-1",
|
||||||
|
Config: map[string]interface{}{
|
||||||
|
"load_limit": schedulerconfig.DefaultSeedPeerLoadLimit,
|
||||||
|
},
|
||||||
|
IsDefault: true,
|
||||||
|
}).Error; err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
seedPeerCluster := model.SeedPeerCluster{}
|
||||||
|
if err := db.First(&seedPeerCluster).Error; err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -166,7 +164,7 @@ func seed(db *gorm.DB) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := db.Model(&cdnCluster).Association("SchedulerClusters").Append(&schedulerCluster); err != nil {
|
if err := db.Model(&seedPeerCluster).Association("SchedulerClusters").Append(&schedulerCluster); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -277,57 +277,3 @@ func (h *Handlers) DeleteSeedPeerClusterToApplication(ctx *gin.Context) {
|
||||||
|
|
||||||
ctx.Status(http.StatusOK)
|
ctx.Status(http.StatusOK)
|
||||||
}
|
}
|
||||||
|
|
||||||
// @Summary Add CDN to Application
|
|
||||||
// @Description Add CDN to Application
|
|
||||||
// @Tags Application
|
|
||||||
// @Accept json
|
|
||||||
// @Produce json
|
|
||||||
// @Param id path string true "id"
|
|
||||||
// @Param cdn_cluster_id path string true "cdn cluster id"
|
|
||||||
// @Success 200
|
|
||||||
// @Failure 400
|
|
||||||
// @Failure 404
|
|
||||||
// @Failure 500
|
|
||||||
// @Router /applications/{id}/cdn-clusters/{cdn_cluster_id} [put]
|
|
||||||
func (h *Handlers) AddCDNClusterToApplication(ctx *gin.Context) {
|
|
||||||
var params types.AddCDNClusterToApplicationParams
|
|
||||||
if err := ctx.ShouldBindUri(¶ms); err != nil {
|
|
||||||
ctx.JSON(http.StatusUnprocessableEntity, gin.H{"errors": err.Error()})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := h.service.AddCDNClusterToApplication(ctx.Request.Context(), params.ID, params.CDNClusterID); err != nil {
|
|
||||||
ctx.Error(err) // nolint: errcheck
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx.Status(http.StatusOK)
|
|
||||||
}
|
|
||||||
|
|
||||||
// @Summary Delete CDN to Application
|
|
||||||
// @Description Delete CDN to Application
|
|
||||||
// @Tags Application
|
|
||||||
// @Accept json
|
|
||||||
// @Produce json
|
|
||||||
// @Param id path string true "id"
|
|
||||||
// @Param cdn_cluster_id path string true "cdn cluster id"
|
|
||||||
// @Success 200
|
|
||||||
// @Failure 400
|
|
||||||
// @Failure 404
|
|
||||||
// @Failure 500
|
|
||||||
// @Router /applications/{id}/cdn-clusters/{cdn_cluster_id} [delete]
|
|
||||||
func (h *Handlers) DeleteCDNClusterToApplication(ctx *gin.Context) {
|
|
||||||
var params types.DeleteCDNClusterToApplicationParams
|
|
||||||
if err := ctx.ShouldBindUri(¶ms); err != nil {
|
|
||||||
ctx.JSON(http.StatusUnprocessableEntity, gin.H{"errors": err.Error()})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := h.service.DeleteCDNClusterToApplication(ctx.Request.Context(), params.ID, params.CDNClusterID); err != nil {
|
|
||||||
ctx.Error(err) // nolint: errcheck
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx.Status(http.StatusOK)
|
|
||||||
}
|
|
||||||
|
|
|
||||||
|
|
@ -1,171 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2020 The Dragonfly Authors
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package handlers
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"github.com/gin-gonic/gin"
|
|
||||||
|
|
||||||
// nolint
|
|
||||||
_ "d7y.io/dragonfly/v2/manager/model"
|
|
||||||
"d7y.io/dragonfly/v2/manager/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
// @Summary Create CDN
|
|
||||||
// @Description create by json config
|
|
||||||
// @Tags CDN
|
|
||||||
// @Accept json
|
|
||||||
// @Produce json
|
|
||||||
// @Param CDN body types.CreateCDNRequest true "CDN"
|
|
||||||
// @Success 200 {object} model.CDN
|
|
||||||
// @Failure 400
|
|
||||||
// @Failure 404
|
|
||||||
// @Failure 500
|
|
||||||
// @Router /cdns [post]
|
|
||||||
func (h *Handlers) CreateCDN(ctx *gin.Context) {
|
|
||||||
var json types.CreateCDNRequest
|
|
||||||
if err := ctx.ShouldBindJSON(&json); err != nil {
|
|
||||||
ctx.JSON(http.StatusUnprocessableEntity, gin.H{"errors": err.Error()})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
cdn, err := h.service.CreateCDN(ctx.Request.Context(), json)
|
|
||||||
if err != nil {
|
|
||||||
ctx.Error(err) // nolint: errcheck
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx.JSON(http.StatusOK, cdn)
|
|
||||||
}
|
|
||||||
|
|
||||||
// @Summary Destroy CDN
|
|
||||||
// @Description Destroy by id
|
|
||||||
// @Tags CDN
|
|
||||||
// @Accept json
|
|
||||||
// @Produce json
|
|
||||||
// @Param id path string true "id"
|
|
||||||
// @Success 200
|
|
||||||
// @Failure 400
|
|
||||||
// @Failure 404
|
|
||||||
// @Failure 500
|
|
||||||
// @Router /cdns/{id} [delete]
|
|
||||||
func (h *Handlers) DestroyCDN(ctx *gin.Context) {
|
|
||||||
var params types.CDNParams
|
|
||||||
if err := ctx.ShouldBindUri(¶ms); err != nil {
|
|
||||||
ctx.JSON(http.StatusUnprocessableEntity, gin.H{"errors": err.Error()})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := h.service.DestroyCDN(ctx.Request.Context(), params.ID); err != nil {
|
|
||||||
ctx.Error(err) // nolint: errcheck
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx.Status(http.StatusOK)
|
|
||||||
}
|
|
||||||
|
|
||||||
// @Summary Update CDN
|
|
||||||
// @Description Update by json config
|
|
||||||
// @Tags CDN
|
|
||||||
// @Accept json
|
|
||||||
// @Produce json
|
|
||||||
// @Param id path string true "id"
|
|
||||||
// @Param CDN body types.UpdateCDNRequest true "CDN"
|
|
||||||
// @Success 200 {object} model.CDN
|
|
||||||
// @Failure 400
|
|
||||||
// @Failure 404
|
|
||||||
// @Failure 500
|
|
||||||
// @Router /cdns/{id} [patch]
|
|
||||||
func (h *Handlers) UpdateCDN(ctx *gin.Context) {
|
|
||||||
var params types.CDNParams
|
|
||||||
if err := ctx.ShouldBindUri(¶ms); err != nil {
|
|
||||||
ctx.JSON(http.StatusUnprocessableEntity, gin.H{"errors": err.Error()})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var json types.UpdateCDNRequest
|
|
||||||
if err := ctx.ShouldBindJSON(&json); err != nil {
|
|
||||||
ctx.JSON(http.StatusUnprocessableEntity, gin.H{"errors": err.Error()})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
cdn, err := h.service.UpdateCDN(ctx.Request.Context(), params.ID, json)
|
|
||||||
if err != nil {
|
|
||||||
ctx.Error(err) // nolint: errcheck
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx.JSON(http.StatusOK, cdn)
|
|
||||||
}
|
|
||||||
|
|
||||||
// @Summary Get CDN
|
|
||||||
// @Description Get CDN by id
|
|
||||||
// @Tags CDN
|
|
||||||
// @Accept json
|
|
||||||
// @Produce json
|
|
||||||
// @Param id path string true "id"
|
|
||||||
// @Success 200 {object} model.CDN
|
|
||||||
// @Failure 400
|
|
||||||
// @Failure 404
|
|
||||||
// @Failure 500
|
|
||||||
// @Router /cdns/{id} [get]
|
|
||||||
func (h *Handlers) GetCDN(ctx *gin.Context) {
|
|
||||||
var params types.CDNParams
|
|
||||||
if err := ctx.ShouldBindUri(¶ms); err != nil {
|
|
||||||
ctx.JSON(http.StatusUnprocessableEntity, gin.H{"errors": err.Error()})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
cdn, err := h.service.GetCDN(ctx.Request.Context(), params.ID)
|
|
||||||
if err != nil {
|
|
||||||
ctx.Error(err) // nolint: errcheck
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx.JSON(http.StatusOK, cdn)
|
|
||||||
}
|
|
||||||
|
|
||||||
// @Summary Get CDNs
|
|
||||||
// @Description Get CDNs
|
|
||||||
// @Tags CDN
|
|
||||||
// @Accept json
|
|
||||||
// @Produce json
|
|
||||||
// @Param page query int true "current page" default(0)
|
|
||||||
// @Param per_page query int true "return max item count, default 10, max 50" default(10) minimum(2) maximum(50)
|
|
||||||
// @Success 200 {object} []model.CDN
|
|
||||||
// @Failure 400
|
|
||||||
// @Failure 404
|
|
||||||
// @Failure 500
|
|
||||||
// @Router /cdns [get]
|
|
||||||
func (h *Handlers) GetCDNs(ctx *gin.Context) {
|
|
||||||
var query types.GetCDNsQuery
|
|
||||||
if err := ctx.ShouldBindQuery(&query); err != nil {
|
|
||||||
ctx.JSON(http.StatusUnprocessableEntity, gin.H{"errors": err.Error()})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
h.setPaginationDefault(&query.Page, &query.PerPage)
|
|
||||||
cdns, count, err := h.service.GetCDNs(ctx.Request.Context(), query)
|
|
||||||
if err != nil {
|
|
||||||
ctx.Error(err) // nolint: errcheck
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
h.setPaginationLinkHeader(ctx, query.Page, query.PerPage, int(count))
|
|
||||||
ctx.JSON(http.StatusOK, cdns)
|
|
||||||
}
|
|
||||||
|
|
@ -1,225 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2020 The Dragonfly Authors
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package handlers
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"github.com/gin-gonic/gin"
|
|
||||||
|
|
||||||
// nolint
|
|
||||||
_ "d7y.io/dragonfly/v2/manager/model"
|
|
||||||
"d7y.io/dragonfly/v2/manager/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
// @Summary Create CDNCluster
|
|
||||||
// @Description create by json config
|
|
||||||
// @Tags CDNCluster
|
|
||||||
// @Accept json
|
|
||||||
// @Produce json
|
|
||||||
// @Param CDNCluster body types.CreateCDNClusterRequest true "DNCluster"
|
|
||||||
// @Success 200 {object} model.CDNCluster
|
|
||||||
// @Failure 400
|
|
||||||
// @Failure 404
|
|
||||||
// @Failure 500
|
|
||||||
// @Router /cdn-clusters [post]
|
|
||||||
func (h *Handlers) CreateCDNCluster(ctx *gin.Context) {
|
|
||||||
var json types.CreateCDNClusterRequest
|
|
||||||
if err := ctx.ShouldBindJSON(&json); err != nil {
|
|
||||||
ctx.JSON(http.StatusUnprocessableEntity, gin.H{"errors": err.Error()})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
cdnCluster, err := h.service.CreateCDNCluster(ctx.Request.Context(), json)
|
|
||||||
if err != nil {
|
|
||||||
ctx.Error(err) // nolint: errcheck
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx.JSON(http.StatusOK, cdnCluster)
|
|
||||||
}
|
|
||||||
|
|
||||||
// @Summary Destroy CDNCluster
|
|
||||||
// @Description Destroy by id
|
|
||||||
// @Tags CDNCluster
|
|
||||||
// @Accept json
|
|
||||||
// @Produce json
|
|
||||||
// @Param id path string true "id"
|
|
||||||
// @Success 200
|
|
||||||
// @Failure 400
|
|
||||||
// @Failure 404
|
|
||||||
// @Failure 500
|
|
||||||
// @Router /cdn-clusters/{id} [delete]
|
|
||||||
func (h *Handlers) DestroyCDNCluster(ctx *gin.Context) {
|
|
||||||
var params types.CDNClusterParams
|
|
||||||
if err := ctx.ShouldBindUri(¶ms); err != nil {
|
|
||||||
ctx.JSON(http.StatusUnprocessableEntity, gin.H{"errors": err.Error()})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := h.service.DestroyCDNCluster(ctx.Request.Context(), params.ID); err != nil {
|
|
||||||
ctx.Error(err) // nolint: errcheck
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx.Status(http.StatusOK)
|
|
||||||
}
|
|
||||||
|
|
||||||
// @Summary Update CDNCluster
|
|
||||||
// @Description Update by json config
|
|
||||||
// @Tags CDNCluster
|
|
||||||
// @Accept json
|
|
||||||
// @Produce json
|
|
||||||
// @Param id path string true "id"
|
|
||||||
// @Param CDNCluster body types.UpdateCDNClusterRequest true "CDNCluster"
|
|
||||||
// @Success 200 {object} model.CDNCluster
|
|
||||||
// @Failure 400
|
|
||||||
// @Failure 404
|
|
||||||
// @Failure 500
|
|
||||||
// @Router /cdn-clusters/{id} [patch]
|
|
||||||
func (h *Handlers) UpdateCDNCluster(ctx *gin.Context) {
|
|
||||||
var params types.CDNClusterParams
|
|
||||||
if err := ctx.ShouldBindUri(¶ms); err != nil {
|
|
||||||
ctx.JSON(http.StatusUnprocessableEntity, gin.H{"errors": err.Error()})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var json types.UpdateCDNClusterRequest
|
|
||||||
if err := ctx.ShouldBindJSON(&json); err != nil {
|
|
||||||
ctx.JSON(http.StatusUnprocessableEntity, gin.H{"errors": err.Error()})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
cdnCluster, err := h.service.UpdateCDNCluster(ctx.Request.Context(), params.ID, json)
|
|
||||||
if err != nil {
|
|
||||||
ctx.Error(err) // nolint: errcheck
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx.JSON(http.StatusOK, cdnCluster)
|
|
||||||
}
|
|
||||||
|
|
||||||
// @Summary Get CDNCluster
|
|
||||||
// @Description Get CDNCluster by id
|
|
||||||
// @Tags CDNCluster
|
|
||||||
// @Accept json
|
|
||||||
// @Produce json
|
|
||||||
// @Param id path string true "id"
|
|
||||||
// @Success 200 {object} model.CDNCluster
|
|
||||||
// @Failure 400
|
|
||||||
// @Failure 404
|
|
||||||
// @Failure 500
|
|
||||||
// @Router /cdn-clusters/{id} [get]
|
|
||||||
func (h *Handlers) GetCDNCluster(ctx *gin.Context) {
|
|
||||||
var params types.CDNClusterParams
|
|
||||||
if err := ctx.ShouldBindUri(¶ms); err != nil {
|
|
||||||
ctx.JSON(http.StatusUnprocessableEntity, gin.H{"errors": err.Error()})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
cdnCluster, err := h.service.GetCDNCluster(ctx.Request.Context(), params.ID)
|
|
||||||
if err != nil {
|
|
||||||
ctx.Error(err) // nolint: errcheck
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx.JSON(http.StatusOK, cdnCluster)
|
|
||||||
}
|
|
||||||
|
|
||||||
// @Summary Get CDNClusters
|
|
||||||
// @Description Get CDNClusters
|
|
||||||
// @Tags CDNCluster
|
|
||||||
// @Accept json
|
|
||||||
// @Produce json
|
|
||||||
// @Param page query int true "current page" default(0)
|
|
||||||
// @Param per_page query int true "return max item count, default 10, max 50" default(10) minimum(2) maximum(50)
|
|
||||||
// @Success 200 {object} []model.CDNCluster
|
|
||||||
// @Failure 400
|
|
||||||
// @Failure 404
|
|
||||||
// @Failure 500
|
|
||||||
// @Router /cdn-clusters [get]
|
|
||||||
func (h *Handlers) GetCDNClusters(ctx *gin.Context) {
|
|
||||||
var query types.GetCDNClustersQuery
|
|
||||||
if err := ctx.ShouldBindQuery(&query); err != nil {
|
|
||||||
ctx.JSON(http.StatusUnprocessableEntity, gin.H{"errors": err.Error()})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
h.setPaginationDefault(&query.Page, &query.PerPage)
|
|
||||||
cdns, count, err := h.service.GetCDNClusters(ctx.Request.Context(), query)
|
|
||||||
if err != nil {
|
|
||||||
ctx.Error(err) // nolint: errcheck
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
h.setPaginationLinkHeader(ctx, query.Page, query.PerPage, int(count))
|
|
||||||
ctx.JSON(http.StatusOK, cdns)
|
|
||||||
}
|
|
||||||
|
|
||||||
// @Summary Add Instance to CDNCluster
|
|
||||||
// @Description Add CDN to CDNCluster
|
|
||||||
// @Tags CDNCluster
|
|
||||||
// @Accept json
|
|
||||||
// @Produce json
|
|
||||||
// @Param id path string true "id"
|
|
||||||
// @Param cdn_id path string true "cdn id"
|
|
||||||
// @Success 200
|
|
||||||
// @Failure 400
|
|
||||||
// @Failure 404
|
|
||||||
// @Failure 500
|
|
||||||
// @Router /cdn-clusters/{id}/cdns/{cdn_id} [put]
|
|
||||||
func (h *Handlers) AddCDNToCDNCluster(ctx *gin.Context) {
|
|
||||||
var params types.AddCDNToCDNClusterParams
|
|
||||||
if err := ctx.ShouldBindUri(¶ms); err != nil {
|
|
||||||
ctx.JSON(http.StatusUnprocessableEntity, gin.H{"errors": err.Error()})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := h.service.AddCDNToCDNCluster(ctx.Request.Context(), params.ID, params.CDNID); err != nil {
|
|
||||||
ctx.Error(err) // nolint: errcheck
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx.Status(http.StatusOK)
|
|
||||||
}
|
|
||||||
|
|
||||||
// @Summary Add SchedulerCluster to CDNCluster
|
|
||||||
// @Description Add SchedulerCluster to CDNCluster
|
|
||||||
// @Tags CDNCluster
|
|
||||||
// @Accept json
|
|
||||||
// @Produce json
|
|
||||||
// @Param id path string true "id"
|
|
||||||
// @Param scheduler_cluster_id path string true "scheduler cluster id"
|
|
||||||
// @Success 200
|
|
||||||
// @Failure 400
|
|
||||||
// @Failure 404
|
|
||||||
// @Failure 500
|
|
||||||
// @Router /cdn-clusters/{id}/scheduler-clusters/{scheduler_cluster_id} [put]
|
|
||||||
func (h *Handlers) AddSchedulerClusterToCDNCluster(ctx *gin.Context) {
|
|
||||||
var params types.AddSchedulerClusterToCDNClusterParams
|
|
||||||
if err := ctx.ShouldBindUri(¶ms); err != nil {
|
|
||||||
ctx.JSON(http.StatusUnprocessableEntity, gin.H{"errors": err.Error()})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := h.service.AddSchedulerClusterToCDNCluster(ctx.Request.Context(), params.ID, params.SchedulerClusterID); err != nil {
|
|
||||||
ctx.Error(err) // nolint: errcheck
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx.Status(http.StatusOK)
|
|
||||||
}
|
|
||||||
|
|
@ -226,34 +226,6 @@ func (h *Handlers) AddSeedPeerClusterToSecurityGroup(ctx *gin.Context) {
|
||||||
ctx.Status(http.StatusOK)
|
ctx.Status(http.StatusOK)
|
||||||
}
|
}
|
||||||
|
|
||||||
// @Summary Add CDN to SecurityGroup
|
|
||||||
// @Description Add CDN to SecurityGroup
|
|
||||||
// @Tags SecurityGroup
|
|
||||||
// @Accept json
|
|
||||||
// @Produce json
|
|
||||||
// @Param id path string true "id"
|
|
||||||
// @Param cdn_cluster_id path string true "cdn cluster id"
|
|
||||||
// @Success 200
|
|
||||||
// @Failure 400
|
|
||||||
// @Failure 404
|
|
||||||
// @Failure 500
|
|
||||||
// @Router /security-groups/{id}/cdn-clusters/{cdn_cluster_id} [put]
|
|
||||||
func (h *Handlers) AddCDNClusterToSecurityGroup(ctx *gin.Context) {
|
|
||||||
var params types.AddCDNClusterToSecurityGroupParams
|
|
||||||
if err := ctx.ShouldBindUri(¶ms); err != nil {
|
|
||||||
ctx.JSON(http.StatusUnprocessableEntity, gin.H{"errors": err.Error()})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
err := h.service.AddCDNClusterToSecurityGroup(ctx.Request.Context(), params.ID, params.CDNClusterID)
|
|
||||||
if err != nil {
|
|
||||||
ctx.Error(err) // nolint: errcheck
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx.Status(http.StatusOK)
|
|
||||||
}
|
|
||||||
|
|
||||||
// @Summary Add SecurityRule to SecurityGroup
|
// @Summary Add SecurityRule to SecurityGroup
|
||||||
// @Description Add SecurityRule to SecurityGroup
|
// @Description Add SecurityRule to SecurityGroup
|
||||||
// @Tags SecurityGroup
|
// @Tags SecurityGroup
|
||||||
|
|
|
||||||
|
|
@ -26,6 +26,5 @@ type Application struct {
|
||||||
UserID uint `gorm:"comment:user id" json:"user_id"`
|
UserID uint `gorm:"comment:user id" json:"user_id"`
|
||||||
User User `json:"user"`
|
User User `json:"user"`
|
||||||
SeedPeerClusters []SeedPeerCluster `json:"seed_peer_clusters"`
|
SeedPeerClusters []SeedPeerCluster `json:"seed_peer_clusters"`
|
||||||
CDNClusters []CDNCluster `json:"cdn_clusters"`
|
|
||||||
SchedulerClusters []SchedulerCluster `json:"scheduler_clusters"`
|
SchedulerClusters []SchedulerCluster `json:"scheduler_clusters"`
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,35 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2020 The Dragonfly Authors
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package model
|
|
||||||
|
|
||||||
const (
|
|
||||||
CDNStateActive = "active"
|
|
||||||
CDNStateInactive = "inactive"
|
|
||||||
)
|
|
||||||
|
|
||||||
type CDN struct {
|
|
||||||
Model
|
|
||||||
HostName string `gorm:"column:host_name;type:varchar(256);index:uk_cdn,unique;not null;comment:hostname" json:"host_name"`
|
|
||||||
IDC string `gorm:"column:idc;type:varchar(1024);comment:internet data center" json:"idc"`
|
|
||||||
Location string `gorm:"column:location;type:varchar(1024);comment:location" json:"location"`
|
|
||||||
IP string `gorm:"column:ip;type:varchar(256);not null;comment:ip address" json:"ip"`
|
|
||||||
Port int32 `gorm:"column:port;not null;comment:grpc service listening port" json:"port"`
|
|
||||||
DownloadPort int32 `gorm:"column:download_port;not null;comment:download service listening port" json:"download_port"`
|
|
||||||
State string `gorm:"column:state;type:varchar(256);default:'inactive';comment:service state" json:"state"`
|
|
||||||
CDNClusterID uint `gorm:"index:uk_cdn,unique;not null;comment:cdn cluster id"`
|
|
||||||
CDNCluster CDNCluster `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
@ -1,32 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2020 The Dragonfly Authors
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package model
|
|
||||||
|
|
||||||
type CDNCluster struct {
|
|
||||||
Model
|
|
||||||
Name string `gorm:"column:name;type:varchar(256);index:uk_cdn_cluster_name,unique;not null;comment:name" json:"name"`
|
|
||||||
BIO string `gorm:"column:bio;type:varchar(1024);comment:biography" json:"bio"`
|
|
||||||
Config JSONMap `gorm:"column:config;not null;comment:configuration" json:"config"`
|
|
||||||
SchedulerClusters []SchedulerCluster `gorm:"many2many:cdn_cluster_scheduler_cluster;" json:"scheduler_clusters"`
|
|
||||||
IsDefault bool `gorm:"column:is_default;not null;default:false;comment:default cdn cluster" json:"is_default"`
|
|
||||||
CDNs []CDN `json:"-"`
|
|
||||||
ApplicationID uint `gorm:"comment:application id" json:"application_id"`
|
|
||||||
Application Application `json:"-"`
|
|
||||||
SecurityGroupID uint `gorm:"comment:security group id" json:"security_group_id"`
|
|
||||||
SecurityGroup SecurityGroup `json:"-"`
|
|
||||||
Jobs []Job `gorm:"many2many:job_cdn_cluster;" json:"jobs"`
|
|
||||||
}
|
|
||||||
|
|
@ -27,6 +27,5 @@ type Job struct {
|
||||||
UserID uint `gorm:"column:user_id;comment:user id" json:"user_id"`
|
UserID uint `gorm:"column:user_id;comment:user id" json:"user_id"`
|
||||||
User User `json:"-"`
|
User User `json:"-"`
|
||||||
SeedPeerClusters []SeedPeerCluster `gorm:"many2many:job_seed_peer_cluster;" json:"seed_peer_clusters"`
|
SeedPeerClusters []SeedPeerCluster `gorm:"many2many:job_seed_peer_cluster;" json:"seed_peer_clusters"`
|
||||||
CDNClusters []CDNCluster `gorm:"many2many:job_cdn_cluster;" json:"cdn_clusters"`
|
|
||||||
SchedulerClusters []SchedulerCluster `gorm:"many2many:job_scheduler_cluster;" json:"scheduler_clusters"`
|
SchedulerClusters []SchedulerCluster `gorm:"many2many:job_scheduler_cluster;" json:"scheduler_clusters"`
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -25,7 +25,6 @@ type SchedulerCluster struct {
|
||||||
Scopes JSONMap `gorm:"column:scopes;comment:match scopes" json:"scopes"`
|
Scopes JSONMap `gorm:"column:scopes;comment:match scopes" json:"scopes"`
|
||||||
IsDefault bool `gorm:"column:is_default;not null;default:false;comment:default scheduler cluster" json:"is_default"`
|
IsDefault bool `gorm:"column:is_default;not null;default:false;comment:default scheduler cluster" json:"is_default"`
|
||||||
SeedPeerClusters []SeedPeerCluster `gorm:"many2many:seed_peer_cluster_scheduler_cluster;" json:"seed_peer_clusters"`
|
SeedPeerClusters []SeedPeerCluster `gorm:"many2many:seed_peer_cluster_scheduler_cluster;" json:"seed_peer_clusters"`
|
||||||
CDNClusters []CDNCluster `gorm:"many2many:cdn_cluster_scheduler_cluster;" json:"cdn_clusters"`
|
|
||||||
Schedulers []Scheduler `json:"-"`
|
Schedulers []Scheduler `json:"-"`
|
||||||
ApplicationID uint `gorm:"comment:application id" json:"application_id"`
|
ApplicationID uint `gorm:"comment:application id" json:"application_id"`
|
||||||
Application Application `json:"-"`
|
Application Application `json:"-"`
|
||||||
|
|
|
||||||
|
|
@ -22,6 +22,5 @@ type SecurityGroup struct {
|
||||||
BIO string `gorm:"column:bio;type:varchar(1024);comment:biography" json:"bio"`
|
BIO string `gorm:"column:bio;type:varchar(1024);comment:biography" json:"bio"`
|
||||||
SecurityRules []SecurityRule `gorm:"many2many:security_group_security_rule;" json:"security_rules"`
|
SecurityRules []SecurityRule `gorm:"many2many:security_group_security_rule;" json:"security_rules"`
|
||||||
SeedPeerClusters []SeedPeerCluster `json:"-"`
|
SeedPeerClusters []SeedPeerCluster `json:"-"`
|
||||||
CDNClusters []CDNCluster `json:"-"`
|
|
||||||
SchedulerClusters []SchedulerCluster `json:"-"`
|
SchedulerClusters []SchedulerCluster `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -22,15 +22,16 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
SeedPeerTypeSuperSeed = "SuperSeed"
|
SeedPeerTypeSuperSeed = "super"
|
||||||
SeedPeerTypeStrongSeed = "StrongSeed"
|
SeedPeerTypeStrongSeed = "strong"
|
||||||
SeedPeerTypeWeakSeed = "WeakSeed"
|
SeedPeerTypeWeakSeed = "weak"
|
||||||
)
|
)
|
||||||
|
|
||||||
type SeedPeer struct {
|
type SeedPeer struct {
|
||||||
Model
|
Model
|
||||||
HostName string `gorm:"column:host_name;type:varchar(256);index:uk_seed_peer,unique;not null;comment:hostname" json:"host_name"`
|
HostName string `gorm:"column:host_name;type:varchar(256);index:uk_seed_peer,unique;not null;comment:hostname" json:"host_name"`
|
||||||
Type string `gorm:"column:type;type:varchar(256);comment:type" json:"type"`
|
Type string `gorm:"column:type;type:varchar(256);comment:type" json:"type"`
|
||||||
|
IsCDN bool `gorm:"column:is_cdn;not null;default:false;comment:cdn seed peer" json:"is_cdn"`
|
||||||
IDC string `gorm:"column:idc;type:varchar(1024);comment:internet data center" json:"idc"`
|
IDC string `gorm:"column:idc;type:varchar(1024);comment:internet data center" json:"idc"`
|
||||||
NetTopology string `gorm:"column:net_topology;type:varchar(1024);comment:network topology" json:"net_topology"`
|
NetTopology string `gorm:"column:net_topology;type:varchar(1024);comment:network topology" json:"net_topology"`
|
||||||
Location string `gorm:"column:location;type:varchar(1024);comment:location" json:"location"`
|
Location string `gorm:"column:location;type:varchar(1024);comment:location" json:"location"`
|
||||||
|
|
|
||||||
|
|
@ -156,8 +156,6 @@ func Init(cfg *config.Config, logDir string, service service.Service, enforcer *
|
||||||
cs.DELETE(":id/scheduler-clusters/:scheduler_cluster_id", h.DeleteSchedulerClusterToApplication)
|
cs.DELETE(":id/scheduler-clusters/:scheduler_cluster_id", h.DeleteSchedulerClusterToApplication)
|
||||||
cs.PUT(":id/seed-peer-clusters/:seed_peer_cluster_id", h.AddSeedPeerClusterToApplication)
|
cs.PUT(":id/seed-peer-clusters/:seed_peer_cluster_id", h.AddSeedPeerClusterToApplication)
|
||||||
cs.DELETE(":id/seed-peer-clusters/:seed_peer_cluster_id", h.DeleteSeedPeerClusterToApplication)
|
cs.DELETE(":id/seed-peer-clusters/:seed_peer_cluster_id", h.DeleteSeedPeerClusterToApplication)
|
||||||
cs.PUT(":id/cdn-clusters/:cdn_cluster_id", h.AddCDNClusterToApplication)
|
|
||||||
cs.DELETE(":id/cdn-clusters/:cdn_cluster_id", h.DeleteCDNClusterToApplication)
|
|
||||||
|
|
||||||
// Seed Peer Cluster
|
// Seed Peer Cluster
|
||||||
spc := apiv1.Group("/seed-peer-clusters", jwt.MiddlewareFunc(), rbac)
|
spc := apiv1.Group("/seed-peer-clusters", jwt.MiddlewareFunc(), rbac)
|
||||||
|
|
@ -177,24 +175,6 @@ func Init(cfg *config.Config, logDir string, service service.Service, enforcer *
|
||||||
sp.GET(":id", h.GetSeedPeer)
|
sp.GET(":id", h.GetSeedPeer)
|
||||||
sp.GET("", h.GetSeedPeers)
|
sp.GET("", h.GetSeedPeers)
|
||||||
|
|
||||||
// CDN Cluster
|
|
||||||
cc := apiv1.Group("/cdn-clusters", jwt.MiddlewareFunc(), rbac)
|
|
||||||
cc.POST("", h.CreateCDNCluster)
|
|
||||||
cc.DELETE(":id", h.DestroyCDNCluster)
|
|
||||||
cc.PATCH(":id", h.UpdateCDNCluster)
|
|
||||||
cc.GET(":id", h.GetCDNCluster)
|
|
||||||
cc.GET("", h.GetCDNClusters)
|
|
||||||
cc.PUT(":id/cdns/:cdn_id", h.AddCDNToCDNCluster)
|
|
||||||
cc.PUT(":id/scheduler-clusters/:scheduler_cluster_id", h.AddSchedulerClusterToCDNCluster)
|
|
||||||
|
|
||||||
// CDN
|
|
||||||
c := apiv1.Group("/cdns", jwt.MiddlewareFunc(), rbac)
|
|
||||||
c.POST("", h.CreateCDN)
|
|
||||||
c.DELETE(":id", h.DestroyCDN)
|
|
||||||
c.PATCH(":id", h.UpdateCDN)
|
|
||||||
c.GET(":id", h.GetCDN)
|
|
||||||
c.GET("", h.GetCDNs)
|
|
||||||
|
|
||||||
// Security Rule
|
// Security Rule
|
||||||
sr := apiv1.Group("/security-rules", jwt.MiddlewareFunc(), rbac)
|
sr := apiv1.Group("/security-rules", jwt.MiddlewareFunc(), rbac)
|
||||||
sr.POST("", h.CreateSecurityRule)
|
sr.POST("", h.CreateSecurityRule)
|
||||||
|
|
@ -212,7 +192,6 @@ func Init(cfg *config.Config, logDir string, service service.Service, enforcer *
|
||||||
sg.GET("", h.GetSecurityGroups)
|
sg.GET("", h.GetSecurityGroups)
|
||||||
sg.PUT(":id/scheduler-clusters/:scheduler_cluster_id", h.AddSchedulerClusterToSecurityGroup)
|
sg.PUT(":id/scheduler-clusters/:scheduler_cluster_id", h.AddSchedulerClusterToSecurityGroup)
|
||||||
sg.PUT(":id/seed-peer-clusters/:seed_peer_cluster_id", h.AddSeedPeerClusterToSecurityGroup)
|
sg.PUT(":id/seed-peer-clusters/:seed_peer_cluster_id", h.AddSeedPeerClusterToSecurityGroup)
|
||||||
sg.PUT(":id/cdn-clusters/:cdn_cluster_id", h.AddCDNClusterToSecurityGroup)
|
|
||||||
sg.PUT(":id/security-rules/:security_rule_id", h.AddSecurityRuleToSecurityGroup)
|
sg.PUT(":id/security-rules/:security_rule_id", h.AddSecurityRuleToSecurityGroup)
|
||||||
sg.DELETE(":id/security-rules/:security_rule_id", h.DestroySecurityRuleToSecurityGroup)
|
sg.DELETE(":id/security-rules/:security_rule_id", h.DestroySecurityRuleToSecurityGroup)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -147,6 +147,7 @@ func (s *Server) GetSeedPeer(ctx context.Context, req *manager.GetSeedPeerReques
|
||||||
pbSeedPeer = manager.SeedPeer{
|
pbSeedPeer = manager.SeedPeer{
|
||||||
Id: uint64(seedPeer.ID),
|
Id: uint64(seedPeer.ID),
|
||||||
Type: seedPeer.Type,
|
Type: seedPeer.Type,
|
||||||
|
IsCdn: seedPeer.IsCDN,
|
||||||
HostName: seedPeer.HostName,
|
HostName: seedPeer.HostName,
|
||||||
Idc: seedPeer.IDC,
|
Idc: seedPeer.IDC,
|
||||||
NetTopology: seedPeer.NetTopology,
|
NetTopology: seedPeer.NetTopology,
|
||||||
|
|
@ -193,6 +194,7 @@ func (s *Server) UpdateSeedPeer(ctx context.Context, req *manager.UpdateSeedPeer
|
||||||
|
|
||||||
if err := s.db.WithContext(ctx).Model(&seedPeer).Updates(model.SeedPeer{
|
if err := s.db.WithContext(ctx).Model(&seedPeer).Updates(model.SeedPeer{
|
||||||
Type: req.Type,
|
Type: req.Type,
|
||||||
|
IsCDN: req.IsCdn,
|
||||||
IDC: req.Idc,
|
IDC: req.Idc,
|
||||||
NetTopology: req.NetTopology,
|
NetTopology: req.NetTopology,
|
||||||
Location: req.Location,
|
Location: req.Location,
|
||||||
|
|
@ -215,6 +217,7 @@ func (s *Server) UpdateSeedPeer(ctx context.Context, req *manager.UpdateSeedPeer
|
||||||
Id: uint64(seedPeer.ID),
|
Id: uint64(seedPeer.ID),
|
||||||
HostName: seedPeer.HostName,
|
HostName: seedPeer.HostName,
|
||||||
Type: seedPeer.Type,
|
Type: seedPeer.Type,
|
||||||
|
IsCdn: seedPeer.IsCDN,
|
||||||
Idc: seedPeer.IDC,
|
Idc: seedPeer.IDC,
|
||||||
NetTopology: seedPeer.NetTopology,
|
NetTopology: seedPeer.NetTopology,
|
||||||
Location: seedPeer.Location,
|
Location: seedPeer.Location,
|
||||||
|
|
@ -231,6 +234,7 @@ func (s *Server) createSeedPeer(ctx context.Context, req *manager.UpdateSeedPeer
|
||||||
seedPeer := model.SeedPeer{
|
seedPeer := model.SeedPeer{
|
||||||
HostName: req.HostName,
|
HostName: req.HostName,
|
||||||
Type: req.Type,
|
Type: req.Type,
|
||||||
|
IsCDN: req.IsCdn,
|
||||||
IDC: req.Idc,
|
IDC: req.Idc,
|
||||||
NetTopology: req.NetTopology,
|
NetTopology: req.NetTopology,
|
||||||
Location: req.Location,
|
Location: req.Location,
|
||||||
|
|
@ -248,6 +252,7 @@ func (s *Server) createSeedPeer(ctx context.Context, req *manager.UpdateSeedPeer
|
||||||
Id: uint64(seedPeer.ID),
|
Id: uint64(seedPeer.ID),
|
||||||
HostName: seedPeer.HostName,
|
HostName: seedPeer.HostName,
|
||||||
Type: seedPeer.Type,
|
Type: seedPeer.Type,
|
||||||
|
IsCdn: seedPeer.IsCDN,
|
||||||
Idc: seedPeer.IDC,
|
Idc: seedPeer.IDC,
|
||||||
NetTopology: seedPeer.NetTopology,
|
NetTopology: seedPeer.NetTopology,
|
||||||
Location: seedPeer.Location,
|
Location: seedPeer.Location,
|
||||||
|
|
@ -259,157 +264,6 @@ func (s *Server) createSeedPeer(ctx context.Context, req *manager.UpdateSeedPeer
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deprecated: Use GetSeedPeer instead.
|
|
||||||
func (s *Server) GetCDN(ctx context.Context, req *manager.GetCDNRequest) (*manager.CDN, error) {
|
|
||||||
var pbCDN manager.CDN
|
|
||||||
cacheKey := cache.MakeCDNCacheKey(req.HostName, uint(req.CdnClusterId))
|
|
||||||
|
|
||||||
// Cache hit.
|
|
||||||
if err := s.cache.Get(ctx, cacheKey, &pbCDN); err == nil {
|
|
||||||
logger.Infof("%s cache hit", cacheKey)
|
|
||||||
return &pbCDN, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Cache miss.
|
|
||||||
logger.Infof("%s cache miss", cacheKey)
|
|
||||||
cdn := model.CDN{}
|
|
||||||
if err := s.db.WithContext(ctx).Preload("CDNCluster").Preload("CDNCluster.SchedulerClusters.Schedulers", &model.Scheduler{
|
|
||||||
State: model.SchedulerStateActive,
|
|
||||||
}).First(&cdn, &model.CDN{
|
|
||||||
HostName: req.HostName,
|
|
||||||
CDNClusterID: uint(req.CdnClusterId),
|
|
||||||
}).Error; err != nil {
|
|
||||||
return nil, status.Error(codes.Unknown, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Marshal config of cdn cluster.
|
|
||||||
config, err := cdn.CDNCluster.Config.MarshalJSON()
|
|
||||||
if err != nil {
|
|
||||||
return nil, status.Error(codes.DataLoss, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Construct schedulers.
|
|
||||||
var pbSchedulers []*manager.Scheduler
|
|
||||||
for _, schedulerCluster := range cdn.CDNCluster.SchedulerClusters {
|
|
||||||
for _, scheduler := range schedulerCluster.Schedulers {
|
|
||||||
pbSchedulers = append(pbSchedulers, &manager.Scheduler{
|
|
||||||
Id: uint64(scheduler.ID),
|
|
||||||
HostName: scheduler.HostName,
|
|
||||||
Idc: scheduler.IDC,
|
|
||||||
Location: scheduler.Location,
|
|
||||||
Ip: scheduler.IP,
|
|
||||||
Port: scheduler.Port,
|
|
||||||
State: scheduler.State,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Construct cdn.
|
|
||||||
pbCDN = manager.CDN{
|
|
||||||
Id: uint64(cdn.ID),
|
|
||||||
HostName: cdn.HostName,
|
|
||||||
Idc: cdn.IDC,
|
|
||||||
Location: cdn.Location,
|
|
||||||
Ip: cdn.IP,
|
|
||||||
Port: cdn.Port,
|
|
||||||
DownloadPort: cdn.DownloadPort,
|
|
||||||
State: cdn.State,
|
|
||||||
CdnClusterId: uint64(cdn.CDNClusterID),
|
|
||||||
CdnCluster: &manager.CDNCluster{
|
|
||||||
Id: uint64(cdn.CDNCluster.ID),
|
|
||||||
Name: cdn.CDNCluster.Name,
|
|
||||||
Bio: cdn.CDNCluster.BIO,
|
|
||||||
Config: config,
|
|
||||||
},
|
|
||||||
Schedulers: pbSchedulers,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Cache data.
|
|
||||||
if err := s.cache.Once(&cachev8.Item{
|
|
||||||
Ctx: ctx,
|
|
||||||
Key: cacheKey,
|
|
||||||
Value: &pbCDN,
|
|
||||||
TTL: s.cache.TTL,
|
|
||||||
}); err != nil {
|
|
||||||
logger.Warnf("storage cache failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &pbCDN, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Use UpdateSeedPeer instead.
|
|
||||||
func (s *Server) UpdateCDN(ctx context.Context, req *manager.UpdateCDNRequest) (*manager.CDN, error) {
|
|
||||||
cdn := model.CDN{}
|
|
||||||
if err := s.db.WithContext(ctx).First(&cdn, model.CDN{
|
|
||||||
HostName: req.HostName,
|
|
||||||
CDNClusterID: uint(req.CdnClusterId),
|
|
||||||
}).Error; err != nil {
|
|
||||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
|
||||||
return s.createCDN(ctx, req)
|
|
||||||
}
|
|
||||||
return nil, status.Error(codes.Unknown, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.db.WithContext(ctx).Model(&cdn).Updates(model.CDN{
|
|
||||||
IDC: req.Idc,
|
|
||||||
Location: req.Location,
|
|
||||||
IP: req.Ip,
|
|
||||||
Port: req.Port,
|
|
||||||
DownloadPort: req.DownloadPort,
|
|
||||||
CDNClusterID: uint(req.CdnClusterId),
|
|
||||||
}).Error; err != nil {
|
|
||||||
return nil, status.Error(codes.Unknown, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.cache.Delete(
|
|
||||||
ctx,
|
|
||||||
cache.MakeCDNCacheKey(cdn.HostName, cdn.CDNClusterID),
|
|
||||||
); err != nil {
|
|
||||||
logger.Warnf("%s refresh keepalive status failed in cdn cluster %d", cdn.HostName, cdn.CDNClusterID)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &manager.CDN{
|
|
||||||
Id: uint64(cdn.ID),
|
|
||||||
HostName: cdn.HostName,
|
|
||||||
Idc: cdn.IDC,
|
|
||||||
Location: cdn.Location,
|
|
||||||
Ip: cdn.IP,
|
|
||||||
Port: cdn.Port,
|
|
||||||
DownloadPort: cdn.DownloadPort,
|
|
||||||
CdnClusterId: uint64(cdn.CDNClusterID),
|
|
||||||
State: cdn.State,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Use createSeedPeer instead.
|
|
||||||
func (s *Server) createCDN(ctx context.Context, req *manager.UpdateCDNRequest) (*manager.CDN, error) {
|
|
||||||
cdn := model.CDN{
|
|
||||||
HostName: req.HostName,
|
|
||||||
IDC: req.Idc,
|
|
||||||
Location: req.Location,
|
|
||||||
IP: req.Ip,
|
|
||||||
Port: req.Port,
|
|
||||||
DownloadPort: req.DownloadPort,
|
|
||||||
CDNClusterID: uint(req.CdnClusterId),
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.db.WithContext(ctx).Create(&cdn).Error; err != nil {
|
|
||||||
return nil, status.Error(codes.Unknown, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
return &manager.CDN{
|
|
||||||
Id: uint64(cdn.ID),
|
|
||||||
HostName: cdn.HostName,
|
|
||||||
Idc: cdn.IDC,
|
|
||||||
Location: cdn.Location,
|
|
||||||
Ip: cdn.IP,
|
|
||||||
Port: cdn.Port,
|
|
||||||
DownloadPort: cdn.DownloadPort,
|
|
||||||
CdnClusterId: uint64(cdn.CDNClusterID),
|
|
||||||
State: cdn.State,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get Scheduler and Scheduler cluster configuration.
|
// Get Scheduler and Scheduler cluster configuration.
|
||||||
func (s *Server) GetScheduler(ctx context.Context, req *manager.GetSchedulerRequest) (*manager.Scheduler, error) {
|
func (s *Server) GetScheduler(ctx context.Context, req *manager.GetSchedulerRequest) (*manager.Scheduler, error) {
|
||||||
var pbScheduler manager.Scheduler
|
var pbScheduler manager.Scheduler
|
||||||
|
|
@ -424,9 +278,7 @@ func (s *Server) GetScheduler(ctx context.Context, req *manager.GetSchedulerRequ
|
||||||
// Cache miss.
|
// Cache miss.
|
||||||
logger.Infof("%s cache miss", cacheKey)
|
logger.Infof("%s cache miss", cacheKey)
|
||||||
scheduler := model.Scheduler{}
|
scheduler := model.Scheduler{}
|
||||||
if err := s.db.WithContext(ctx).Preload("SchedulerCluster").Preload("SchedulerCluster.CDNClusters.CDNs", &model.CDN{
|
if err := s.db.WithContext(ctx).Preload("SchedulerCluster").Preload("SchedulerCluster.SeedPeerClusters.SeedPeers", &model.SeedPeer{
|
||||||
State: model.CDNStateActive,
|
|
||||||
}).Preload("SchedulerCluster.SeedPeerClusters.SeedPeers", &model.CDN{
|
|
||||||
State: model.SeedPeerStateActive,
|
State: model.SeedPeerStateActive,
|
||||||
}).First(&scheduler, &model.Scheduler{
|
}).First(&scheduler, &model.Scheduler{
|
||||||
HostName: req.HostName,
|
HostName: req.HostName,
|
||||||
|
|
@ -460,6 +312,7 @@ func (s *Server) GetScheduler(ctx context.Context, req *manager.GetSchedulerRequ
|
||||||
Id: uint64(seedPeer.ID),
|
Id: uint64(seedPeer.ID),
|
||||||
HostName: seedPeer.HostName,
|
HostName: seedPeer.HostName,
|
||||||
Type: seedPeer.Type,
|
Type: seedPeer.Type,
|
||||||
|
IsCdn: seedPeer.IsCDN,
|
||||||
Idc: seedPeer.IDC,
|
Idc: seedPeer.IDC,
|
||||||
NetTopology: seedPeer.NetTopology,
|
NetTopology: seedPeer.NetTopology,
|
||||||
Location: seedPeer.Location,
|
Location: seedPeer.Location,
|
||||||
|
|
@ -478,35 +331,6 @@ func (s *Server) GetScheduler(ctx context.Context, req *manager.GetSchedulerRequ
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deprecated: Use pbSeedPeers instead.
|
|
||||||
var pbCDNs []*manager.CDN
|
|
||||||
for _, cdnCluster := range scheduler.SchedulerCluster.CDNClusters {
|
|
||||||
cdnClusterConfig, err := cdnCluster.Config.MarshalJSON()
|
|
||||||
if err != nil {
|
|
||||||
return nil, status.Error(codes.DataLoss, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, cdn := range cdnCluster.CDNs {
|
|
||||||
pbCDNs = append(pbCDNs, &manager.CDN{
|
|
||||||
Id: uint64(cdn.ID),
|
|
||||||
HostName: cdn.HostName,
|
|
||||||
Idc: cdn.IDC,
|
|
||||||
Location: cdn.Location,
|
|
||||||
Ip: cdn.IP,
|
|
||||||
Port: cdn.Port,
|
|
||||||
DownloadPort: cdn.DownloadPort,
|
|
||||||
State: cdn.State,
|
|
||||||
CdnClusterId: uint64(cdn.CDNClusterID),
|
|
||||||
CdnCluster: &manager.CDNCluster{
|
|
||||||
Id: uint64(cdnCluster.ID),
|
|
||||||
Name: cdnCluster.Name,
|
|
||||||
Bio: cdnCluster.BIO,
|
|
||||||
Config: cdnClusterConfig,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Construct scheduler.
|
// Construct scheduler.
|
||||||
pbScheduler = manager.Scheduler{
|
pbScheduler = manager.Scheduler{
|
||||||
Id: uint64(scheduler.ID),
|
Id: uint64(scheduler.ID),
|
||||||
|
|
@ -526,7 +350,6 @@ func (s *Server) GetScheduler(ctx context.Context, req *manager.GetSchedulerRequ
|
||||||
ClientConfig: schedulerClusterClientConfig,
|
ClientConfig: schedulerClusterClientConfig,
|
||||||
},
|
},
|
||||||
SeedPeers: pbSeedPeers,
|
SeedPeers: pbSeedPeers,
|
||||||
Cdns: pbCDNs,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cache data.
|
// Cache data.
|
||||||
|
|
@ -639,7 +462,7 @@ func (s *Server) ListSchedulers(ctx context.Context, req *manager.ListSchedulers
|
||||||
log.Infof("list scheduler clusters %v with hostInfo %#v", getSchedulerClusterNames(schedulerClusters), req.HostInfo)
|
log.Infof("list scheduler clusters %v with hostInfo %#v", getSchedulerClusterNames(schedulerClusters), req.HostInfo)
|
||||||
schedulerClusters, err := s.searcher.FindSchedulerClusters(ctx, schedulerClusters, req)
|
schedulerClusters, err := s.searcher.FindSchedulerClusters(ctx, schedulerClusters, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("can not matching scheduler cluster %v", err)
|
log.Error(err)
|
||||||
return nil, status.Error(codes.NotFound, "scheduler cluster not found")
|
return nil, status.Error(codes.NotFound, "scheduler cluster not found")
|
||||||
}
|
}
|
||||||
log.Infof("find matching scheduler cluster %v", getSchedulerClusterNames(schedulerClusters))
|
log.Infof("find matching scheduler cluster %v", getSchedulerClusterNames(schedulerClusters))
|
||||||
|
|
@ -731,26 +554,6 @@ func (s *Server) KeepAlive(stream manager.Manager_KeepAliveServer) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deprecated: Use SourceType_SEED_PEER_SOURCE instead.
|
|
||||||
if sourceType == manager.SourceType_CDN_SOURCE {
|
|
||||||
cdn := model.CDN{}
|
|
||||||
if err := s.db.First(&cdn, model.CDN{
|
|
||||||
HostName: hostName,
|
|
||||||
CDNClusterID: clusterID,
|
|
||||||
}).Updates(model.CDN{
|
|
||||||
State: model.CDNStateActive,
|
|
||||||
}).Error; err != nil {
|
|
||||||
return status.Error(codes.Unknown, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.cache.Delete(
|
|
||||||
context.TODO(),
|
|
||||||
cache.MakeCDNCacheKey(hostName, clusterID),
|
|
||||||
); err != nil {
|
|
||||||
logger.Warnf("%s refresh keepalive status failed in cdn cluster %d", hostName, clusterID)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
for {
|
||||||
_, err := stream.Recv()
|
_, err := stream.Recv()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -794,26 +597,6 @@ func (s *Server) KeepAlive(stream manager.Manager_KeepAliveServer) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deprecated: Use SourceType_SEED_PEER_SOURCE instead.
|
|
||||||
if sourceType == manager.SourceType_CDN_SOURCE {
|
|
||||||
cdn := model.CDN{}
|
|
||||||
if err := s.db.First(&cdn, model.CDN{
|
|
||||||
HostName: hostName,
|
|
||||||
CDNClusterID: clusterID,
|
|
||||||
}).Updates(model.CDN{
|
|
||||||
State: model.CDNStateInactive,
|
|
||||||
}).Error; err != nil {
|
|
||||||
return status.Error(codes.Unknown, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.cache.Delete(
|
|
||||||
context.TODO(),
|
|
||||||
cache.MakeCDNCacheKey(hostName, clusterID),
|
|
||||||
); err != nil {
|
|
||||||
logger.Warnf("%s refresh keepalive status failed in cdn cluster %d", hostName, clusterID)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
logger.Infof("%s close keepalive in cluster %d", hostName, clusterID)
|
logger.Infof("%s close keepalive in cluster %d", hostName, clusterID)
|
||||||
return nil
|
return nil
|
||||||
|
|
|
||||||
|
|
@ -110,7 +110,7 @@ func (s *searcher) FindSchedulerClusters(ctx context.Context, schedulerClusters
|
||||||
|
|
||||||
clusters := FilterSchedulerClusters(conditions, schedulerClusters)
|
clusters := FilterSchedulerClusters(conditions, schedulerClusters)
|
||||||
if len(clusters) == 0 {
|
if len(clusters) == 0 {
|
||||||
return nil, fmt.Errorf("security domain %s does not match any scheduler cluster", conditions[ConditionSecurityDomain])
|
return nil, fmt.Errorf("conditions %#v does not match any scheduler cluster", conditions)
|
||||||
}
|
}
|
||||||
|
|
||||||
sort.Slice(
|
sort.Slice(
|
||||||
|
|
|
||||||
|
|
@ -78,7 +78,7 @@ func TestSchedulerCluster(t *testing.T) {
|
||||||
conditions: map[string]string{"security_domain": "domain-1"},
|
conditions: map[string]string{"security_domain": "domain-1"},
|
||||||
expect: func(t *testing.T, data []model.SchedulerCluster, err error) {
|
expect: func(t *testing.T, data []model.SchedulerCluster, err error) {
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
assert.EqualError(err, "security domain domain-1 does not match any scheduler cluster")
|
assert.Error(err)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
|
||||||
|
|
@ -33,7 +33,7 @@ func (s *service) CreateApplication(ctx context.Context, json types.CreateApplic
|
||||||
State: json.State,
|
State: json.State,
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.db.WithContext(ctx).Preload("SeedPeerClusters").Preload("CDNClusters").Preload("SchedulerClusters").Preload("User").Create(&application).Error; err != nil {
|
if err := s.db.WithContext(ctx).Preload("SeedPeerClusters").Preload("SchedulerClusters").Preload("User").Create(&application).Error; err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -55,7 +55,7 @@ func (s *service) DestroyApplication(ctx context.Context, id uint) error {
|
||||||
|
|
||||||
func (s *service) UpdateApplication(ctx context.Context, id uint, json types.UpdateApplicationRequest) (*model.Application, error) {
|
func (s *service) UpdateApplication(ctx context.Context, id uint, json types.UpdateApplicationRequest) (*model.Application, error) {
|
||||||
application := model.Application{}
|
application := model.Application{}
|
||||||
if err := s.db.WithContext(ctx).Preload("SeedPeerClusters").Preload("CDNClusters").Preload("SchedulerClusters").Preload("User").First(&application, id).Updates(model.Application{
|
if err := s.db.WithContext(ctx).Preload("SeedPeerClusters").Preload("SchedulerClusters").Preload("User").First(&application, id).Updates(model.Application{
|
||||||
Name: json.Name,
|
Name: json.Name,
|
||||||
DownloadRateLimit: json.DownloadRateLimit,
|
DownloadRateLimit: json.DownloadRateLimit,
|
||||||
URL: json.URL,
|
URL: json.URL,
|
||||||
|
|
@ -71,7 +71,7 @@ func (s *service) UpdateApplication(ctx context.Context, id uint, json types.Upd
|
||||||
|
|
||||||
func (s *service) GetApplication(ctx context.Context, id uint) (*model.Application, error) {
|
func (s *service) GetApplication(ctx context.Context, id uint) (*model.Application, error) {
|
||||||
application := model.Application{}
|
application := model.Application{}
|
||||||
if err := s.db.WithContext(ctx).Preload("SeedPeerClusters").Preload("CDNClusters").Preload("SchedulerClusters").Preload("User").First(&application, id).Error; err != nil {
|
if err := s.db.WithContext(ctx).Preload("SeedPeerClusters").Preload("SchedulerClusters").Preload("User").First(&application, id).Error; err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -81,7 +81,7 @@ func (s *service) GetApplication(ctx context.Context, id uint) (*model.Applicati
|
||||||
func (s *service) GetApplications(ctx context.Context, q types.GetApplicationsQuery) (*[]model.Application, int64, error) {
|
func (s *service) GetApplications(ctx context.Context, q types.GetApplicationsQuery) (*[]model.Application, int64, error) {
|
||||||
var count int64
|
var count int64
|
||||||
applications := []model.Application{}
|
applications := []model.Application{}
|
||||||
if err := s.db.WithContext(ctx).Scopes(model.Paginate(q.Page, q.PerPage)).Preload("SeedPeerClusters").Preload("CDNClusters").Preload("SchedulerClusters").Preload("User").Find(&applications).Count(&count).Error; err != nil {
|
if err := s.db.WithContext(ctx).Scopes(model.Paginate(q.Page, q.PerPage)).Preload("SeedPeerClusters").Preload("SchedulerClusters").Preload("User").Find(&applications).Count(&count).Error; err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -159,39 +159,3 @@ func (s *service) DeleteSeedPeerClusterToApplication(ctx context.Context, id, se
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *service) AddCDNClusterToApplication(ctx context.Context, id, cdnClusterID uint) error {
|
|
||||||
application := model.Application{}
|
|
||||||
if err := s.db.WithContext(ctx).First(&application, id).Error; err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
cdnCluster := model.CDNCluster{}
|
|
||||||
if err := s.db.WithContext(ctx).First(&cdnCluster, cdnClusterID).Error; err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.db.WithContext(ctx).Model(&application).Association("CDNClusters").Append(&cdnCluster); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *service) DeleteCDNClusterToApplication(ctx context.Context, id, cdnClusterID uint) error {
|
|
||||||
application := model.Application{}
|
|
||||||
if err := s.db.WithContext(ctx).First(&application, id).Error; err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
cdnCluster := model.CDNCluster{}
|
|
||||||
if err := s.db.WithContext(ctx).First(&cdnCluster, cdnClusterID).Error; err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.db.Model(&application).Association("CDNClusters").Delete(&cdnCluster); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
|
||||||
|
|
@ -1,98 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2020 The Dragonfly Authors
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package service
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
"d7y.io/dragonfly/v2/manager/model"
|
|
||||||
"d7y.io/dragonfly/v2/manager/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (s *service) CreateCDN(ctx context.Context, json types.CreateCDNRequest) (*model.CDN, error) {
|
|
||||||
cdn := model.CDN{
|
|
||||||
HostName: json.HostName,
|
|
||||||
IDC: json.IDC,
|
|
||||||
Location: json.Location,
|
|
||||||
IP: json.IP,
|
|
||||||
Port: json.Port,
|
|
||||||
DownloadPort: json.DownloadPort,
|
|
||||||
CDNClusterID: json.CDNClusterID,
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.db.WithContext(ctx).Create(&cdn).Error; err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &cdn, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *service) DestroyCDN(ctx context.Context, id uint) error {
|
|
||||||
cdn := model.CDN{}
|
|
||||||
if err := s.db.WithContext(ctx).First(&cdn, id).Error; err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.db.WithContext(ctx).Unscoped().Delete(&model.CDN{}, id).Error; err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *service) UpdateCDN(ctx context.Context, id uint, json types.UpdateCDNRequest) (*model.CDN, error) {
|
|
||||||
cdn := model.CDN{}
|
|
||||||
if err := s.db.WithContext(ctx).First(&cdn, id).Updates(model.CDN{
|
|
||||||
IDC: json.IDC,
|
|
||||||
Location: json.Location,
|
|
||||||
IP: json.IP,
|
|
||||||
Port: json.Port,
|
|
||||||
DownloadPort: json.DownloadPort,
|
|
||||||
CDNClusterID: json.CDNClusterID,
|
|
||||||
}).Error; err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &cdn, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *service) GetCDN(ctx context.Context, id uint) (*model.CDN, error) {
|
|
||||||
cdn := model.CDN{}
|
|
||||||
if err := s.db.WithContext(ctx).First(&cdn, id).Error; err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &cdn, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *service) GetCDNs(ctx context.Context, q types.GetCDNsQuery) (*[]model.CDN, int64, error) {
|
|
||||||
var count int64
|
|
||||||
var cdns []model.CDN
|
|
||||||
if err := s.db.WithContext(ctx).Scopes(model.Paginate(q.Page, q.PerPage)).Where(&model.CDN{
|
|
||||||
HostName: q.HostName,
|
|
||||||
IDC: q.IDC,
|
|
||||||
Location: q.Location,
|
|
||||||
IP: q.IP,
|
|
||||||
Port: q.Port,
|
|
||||||
DownloadPort: q.DownloadPort,
|
|
||||||
CDNClusterID: q.CDNClusterID,
|
|
||||||
}).Find(&cdns).Count(&count).Error; err != nil {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &cdns, count, nil
|
|
||||||
}
|
|
||||||
|
|
@ -1,148 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2020 The Dragonfly Authors
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package service
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
|
|
||||||
"d7y.io/dragonfly/v2/manager/model"
|
|
||||||
"d7y.io/dragonfly/v2/manager/types"
|
|
||||||
"d7y.io/dragonfly/v2/pkg/util/structutils"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (s *service) CreateCDNCluster(ctx context.Context, json types.CreateCDNClusterRequest) (*model.CDNCluster, error) {
|
|
||||||
config, err := structutils.StructToMap(json.Config)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
cdnCluster := model.CDNCluster{
|
|
||||||
Name: json.Name,
|
|
||||||
BIO: json.BIO,
|
|
||||||
Config: config,
|
|
||||||
IsDefault: json.IsDefault,
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.db.WithContext(ctx).Create(&cdnCluster).Error; err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &cdnCluster, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *service) DestroyCDNCluster(ctx context.Context, id uint) error {
|
|
||||||
cdnCluster := model.CDNCluster{}
|
|
||||||
if err := s.db.WithContext(ctx).Preload("CDNs").First(&cdnCluster, id).Error; err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(cdnCluster.CDNs) != 0 {
|
|
||||||
return errors.New("cdn cluster exists cdn")
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.db.WithContext(ctx).Unscoped().Delete(&model.CDNCluster{}, id).Error; err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *service) UpdateCDNCluster(ctx context.Context, id uint, json types.UpdateCDNClusterRequest) (*model.CDNCluster, error) {
|
|
||||||
config, err := structutils.StructToMap(json.Config)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
cdnCluster := model.CDNCluster{}
|
|
||||||
if err := s.db.WithContext(ctx).First(&cdnCluster, id).Updates(model.CDNCluster{
|
|
||||||
Name: json.Name,
|
|
||||||
BIO: json.BIO,
|
|
||||||
Config: config,
|
|
||||||
IsDefault: json.IsDefault,
|
|
||||||
}).Error; err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &cdnCluster, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *service) GetCDNCluster(ctx context.Context, id uint) (*model.CDNCluster, error) {
|
|
||||||
cdnCluster := model.CDNCluster{}
|
|
||||||
if err := s.db.WithContext(ctx).First(&cdnCluster, id).Error; err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &cdnCluster, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *service) GetCDNClusters(ctx context.Context, q types.GetCDNClustersQuery) (*[]model.CDNCluster, int64, error) {
|
|
||||||
var count int64
|
|
||||||
var cdnClusters []model.CDNCluster
|
|
||||||
if err := s.db.WithContext(ctx).Scopes(model.Paginate(q.Page, q.PerPage)).Where(&model.CDNCluster{
|
|
||||||
Name: q.Name,
|
|
||||||
}).Find(&cdnClusters).Count(&count).Error; err != nil {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &cdnClusters, count, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *service) AddCDNToCDNCluster(ctx context.Context, id, cdnID uint) error {
|
|
||||||
cdnCluster := model.CDNCluster{}
|
|
||||||
if err := s.db.WithContext(ctx).First(&cdnCluster, id).Error; err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
cdn := model.CDN{}
|
|
||||||
if err := s.db.WithContext(ctx).First(&cdn, cdnID).Error; err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.db.WithContext(ctx).Model(&cdnCluster).Association("CDNs").Append(&cdn); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *service) AddSchedulerClusterToCDNCluster(ctx context.Context, id, schedulerClusterID uint) error {
|
|
||||||
cdnCluster := model.CDNCluster{}
|
|
||||||
if err := s.db.WithContext(ctx).First(&cdnCluster, id).Error; err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
schedulerCluster := model.SchedulerCluster{}
|
|
||||||
if err := s.db.WithContext(ctx).First(&schedulerCluster, schedulerClusterID).Error; err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
cdnClusters := []model.CDNCluster{}
|
|
||||||
if err := s.db.WithContext(ctx).Model(&schedulerCluster).Association("CDNClusters").Find(&cdnClusters); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.db.WithContext(ctx).Model(&schedulerCluster).Association("CDNClusters").Delete(cdnClusters); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.db.WithContext(ctx).Model(&cdnCluster).Association("SchedulerClusters").Append(&schedulerCluster); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
@ -155,7 +155,7 @@ func (s *service) DestroyJob(ctx context.Context, id uint) error {
|
||||||
|
|
||||||
func (s *service) UpdateJob(ctx context.Context, id uint, json types.UpdateJobRequest) (*model.Job, error) {
|
func (s *service) UpdateJob(ctx context.Context, id uint, json types.UpdateJobRequest) (*model.Job, error) {
|
||||||
job := model.Job{}
|
job := model.Job{}
|
||||||
if err := s.db.WithContext(ctx).Preload("SeedPeerClusters").Preload("CDNClusters").Preload("SchedulerClusters").First(&job, id).Updates(model.Job{
|
if err := s.db.WithContext(ctx).Preload("SeedPeerClusters").Preload("SchedulerClusters").First(&job, id).Updates(model.Job{
|
||||||
BIO: json.BIO,
|
BIO: json.BIO,
|
||||||
UserID: json.UserID,
|
UserID: json.UserID,
|
||||||
}).Error; err != nil {
|
}).Error; err != nil {
|
||||||
|
|
@ -231,25 +231,3 @@ func (s *service) AddJobToSeedPeerClusters(ctx context.Context, id, seedPeerClus
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *service) AddJobToCDNClusters(ctx context.Context, id, cdnClusterIDs []uint) error {
|
|
||||||
job := model.Job{}
|
|
||||||
if err := s.db.WithContext(ctx).First(&job, id).Error; err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var cdnClusters []*model.CDNCluster
|
|
||||||
for _, cdnClusterID := range cdnClusterIDs {
|
|
||||||
cdnCluster := model.CDNCluster{}
|
|
||||||
if err := s.db.WithContext(ctx).First(&cdnCluster, cdnClusterID).Error; err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
cdnClusters = append(cdnClusters, &cdnCluster)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.db.WithContext(ctx).Model(&job).Association("CDNClusters").Append(cdnClusters); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
|
||||||
|
|
@ -60,12 +60,6 @@ func (s *service) CreateSchedulerCluster(ctx context.Context, json types.CreateS
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if json.CDNClusterID > 0 {
|
|
||||||
if err := s.AddSchedulerClusterToCDNCluster(ctx, json.CDNClusterID, schedulerCluster.ID); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return &schedulerCluster, nil
|
return &schedulerCluster, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -120,18 +114,12 @@ func (s *service) UpdateSchedulerCluster(ctx context.Context, id uint, json type
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if json.CDNClusterID > 0 {
|
|
||||||
if err := s.AddSchedulerClusterToCDNCluster(ctx, json.CDNClusterID, schedulerCluster.ID); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return &schedulerCluster, nil
|
return &schedulerCluster, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *service) GetSchedulerCluster(ctx context.Context, id uint) (*model.SchedulerCluster, error) {
|
func (s *service) GetSchedulerCluster(ctx context.Context, id uint) (*model.SchedulerCluster, error) {
|
||||||
schedulerCluster := model.SchedulerCluster{}
|
schedulerCluster := model.SchedulerCluster{}
|
||||||
if err := s.db.WithContext(ctx).Preload("SeedPeerClusters").Preload("CDNClusters").First(&schedulerCluster, id).Error; err != nil {
|
if err := s.db.WithContext(ctx).Preload("SeedPeerClusters").First(&schedulerCluster, id).Error; err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -143,7 +131,7 @@ func (s *service) GetSchedulerClusters(ctx context.Context, q types.GetScheduler
|
||||||
var schedulerClusters []model.SchedulerCluster
|
var schedulerClusters []model.SchedulerCluster
|
||||||
if err := s.db.WithContext(ctx).Scopes(model.Paginate(q.Page, q.PerPage)).Where(&model.SchedulerCluster{
|
if err := s.db.WithContext(ctx).Scopes(model.Paginate(q.Page, q.PerPage)).Where(&model.SchedulerCluster{
|
||||||
Name: q.Name,
|
Name: q.Name,
|
||||||
}).Preload("SeedPeerClusters").Preload("CDNClusters").Find(&schedulerClusters).Count(&count).Error; err != nil {
|
}).Preload("SeedPeerClusters").Find(&schedulerClusters).Count(&count).Error; err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -118,24 +118,6 @@ func (s *service) AddSeedPeerClusterToSecurityGroup(ctx context.Context, id, see
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *service) AddCDNClusterToSecurityGroup(ctx context.Context, id, cdnClusterID uint) error {
|
|
||||||
securityGroup := model.SecurityGroup{}
|
|
||||||
if err := s.db.WithContext(ctx).First(&securityGroup, id).Error; err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
cdnCluster := model.CDNCluster{}
|
|
||||||
if err := s.db.WithContext(ctx).First(&cdnCluster, cdnClusterID).Error; err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.db.WithContext(ctx).Model(&securityGroup).Association("CDNClusters").Append(&cdnCluster); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *service) AddSecurityRuleToSecurityGroup(ctx context.Context, id, securityRuleID uint) error {
|
func (s *service) AddSecurityRuleToSecurityGroup(ctx context.Context, id, securityRuleID uint) error {
|
||||||
securityGroup := model.SecurityGroup{}
|
securityGroup := model.SecurityGroup{}
|
||||||
if err := s.db.WithContext(ctx).First(&securityGroup, id).Error; err != nil {
|
if err := s.db.WithContext(ctx).First(&securityGroup, id).Error; err != nil {
|
||||||
|
|
|
||||||
|
|
@ -74,20 +74,6 @@ type Service interface {
|
||||||
GetSeedPeer(context.Context, uint) (*model.SeedPeer, error)
|
GetSeedPeer(context.Context, uint) (*model.SeedPeer, error)
|
||||||
GetSeedPeers(context.Context, types.GetSeedPeersQuery) (*[]model.SeedPeer, int64, error)
|
GetSeedPeers(context.Context, types.GetSeedPeersQuery) (*[]model.SeedPeer, int64, error)
|
||||||
|
|
||||||
CreateCDNCluster(context.Context, types.CreateCDNClusterRequest) (*model.CDNCluster, error)
|
|
||||||
DestroyCDNCluster(context.Context, uint) error
|
|
||||||
UpdateCDNCluster(context.Context, uint, types.UpdateCDNClusterRequest) (*model.CDNCluster, error)
|
|
||||||
GetCDNCluster(context.Context, uint) (*model.CDNCluster, error)
|
|
||||||
GetCDNClusters(context.Context, types.GetCDNClustersQuery) (*[]model.CDNCluster, int64, error)
|
|
||||||
AddCDNToCDNCluster(context.Context, uint, uint) error
|
|
||||||
AddSchedulerClusterToCDNCluster(context.Context, uint, uint) error
|
|
||||||
|
|
||||||
CreateCDN(context.Context, types.CreateCDNRequest) (*model.CDN, error)
|
|
||||||
DestroyCDN(context.Context, uint) error
|
|
||||||
UpdateCDN(context.Context, uint, types.UpdateCDNRequest) (*model.CDN, error)
|
|
||||||
GetCDN(context.Context, uint) (*model.CDN, error)
|
|
||||||
GetCDNs(context.Context, types.GetCDNsQuery) (*[]model.CDN, int64, error)
|
|
||||||
|
|
||||||
CreateSchedulerCluster(context.Context, types.CreateSchedulerClusterRequest) (*model.SchedulerCluster, error)
|
CreateSchedulerCluster(context.Context, types.CreateSchedulerClusterRequest) (*model.SchedulerCluster, error)
|
||||||
DestroySchedulerCluster(context.Context, uint) error
|
DestroySchedulerCluster(context.Context, uint) error
|
||||||
UpdateSchedulerCluster(context.Context, uint, types.UpdateSchedulerClusterRequest) (*model.SchedulerCluster, error)
|
UpdateSchedulerCluster(context.Context, uint, types.UpdateSchedulerClusterRequest) (*model.SchedulerCluster, error)
|
||||||
|
|
@ -114,7 +100,6 @@ type Service interface {
|
||||||
GetSecurityGroups(context.Context, types.GetSecurityGroupsQuery) (*[]model.SecurityGroup, int64, error)
|
GetSecurityGroups(context.Context, types.GetSecurityGroupsQuery) (*[]model.SecurityGroup, int64, error)
|
||||||
AddSchedulerClusterToSecurityGroup(context.Context, uint, uint) error
|
AddSchedulerClusterToSecurityGroup(context.Context, uint, uint) error
|
||||||
AddSeedPeerClusterToSecurityGroup(context.Context, uint, uint) error
|
AddSeedPeerClusterToSecurityGroup(context.Context, uint, uint) error
|
||||||
AddCDNClusterToSecurityGroup(context.Context, uint, uint) error
|
|
||||||
AddSecurityRuleToSecurityGroup(context.Context, uint, uint) error
|
AddSecurityRuleToSecurityGroup(context.Context, uint, uint) error
|
||||||
DestroySecurityRuleToSecurityGroup(context.Context, uint, uint) error
|
DestroySecurityRuleToSecurityGroup(context.Context, uint, uint) error
|
||||||
|
|
||||||
|
|
@ -142,8 +127,6 @@ type Service interface {
|
||||||
DeleteSchedulerClusterToApplication(context.Context, uint, uint) error
|
DeleteSchedulerClusterToApplication(context.Context, uint, uint) error
|
||||||
AddSeedPeerClusterToApplication(context.Context, uint, uint) error
|
AddSeedPeerClusterToApplication(context.Context, uint, uint) error
|
||||||
DeleteSeedPeerClusterToApplication(context.Context, uint, uint) error
|
DeleteSeedPeerClusterToApplication(context.Context, uint, uint) error
|
||||||
AddCDNClusterToApplication(context.Context, uint, uint) error
|
|
||||||
DeleteCDNClusterToApplication(context.Context, uint, uint) error
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type service struct {
|
type service struct {
|
||||||
|
|
|
||||||
|
|
@ -40,16 +40,6 @@ type DeleteSeedPeerClusterToApplicationParams struct {
|
||||||
SeedPeerClusterID uint `uri:"seed_peer_cluster_id" binding:"required"`
|
SeedPeerClusterID uint `uri:"seed_peer_cluster_id" binding:"required"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type AddCDNClusterToApplicationParams struct {
|
|
||||||
ID uint `uri:"id" binding:"required"`
|
|
||||||
CDNClusterID uint `uri:"cdn_cluster_id" binding:"required"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type DeleteCDNClusterToApplicationParams struct {
|
|
||||||
ID uint `uri:"id" binding:"required"`
|
|
||||||
CDNClusterID uint `uri:"cdn_cluster_id" binding:"required"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type CreateApplicationRequest struct {
|
type CreateApplicationRequest struct {
|
||||||
Name string `json:"name" binding:"required"`
|
Name string `json:"name" binding:"required"`
|
||||||
BIO string `json:"bio" binding:"omitempty"`
|
BIO string `json:"bio" binding:"omitempty"`
|
||||||
|
|
|
||||||
|
|
@ -1,53 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2020 The Dragonfly Authors
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package types
|
|
||||||
|
|
||||||
type CDNParams struct {
|
|
||||||
ID uint `uri:"id" binding:"required"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type CreateCDNRequest struct {
|
|
||||||
HostName string `json:"host_name" binding:"required"`
|
|
||||||
IDC string `json:"idc" binding:"omitempty"`
|
|
||||||
Location string `json:"location" binding:"omitempty"`
|
|
||||||
IP string `json:"ip" binding:"required"`
|
|
||||||
Port int32 `json:"port" binding:"required"`
|
|
||||||
DownloadPort int32 `json:"download_port" binding:"required"`
|
|
||||||
CDNClusterID uint `json:"cdn_cluster_id" binding:"required"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type UpdateCDNRequest struct {
|
|
||||||
IDC string `json:"idc" binding:"omitempty"`
|
|
||||||
Location string `json:"location" binding:"omitempty"`
|
|
||||||
IP string `json:"ip" binding:"omitempty"`
|
|
||||||
Port int32 `json:"port" binding:"omitempty"`
|
|
||||||
DownloadPort int32 `json:"download_port" binding:"omitempty"`
|
|
||||||
CDNClusterID uint `json:"cdn_cluster_id" binding:"omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type GetCDNsQuery struct {
|
|
||||||
HostName string `form:"host_name" binding:"omitempty"`
|
|
||||||
IDC string `form:"idc" binding:"omitempty"`
|
|
||||||
Location string `form:"location" binding:"omitempty"`
|
|
||||||
IP string `form:"ip" binding:"omitempty"`
|
|
||||||
Port int32 `form:"port" binding:"omitempty"`
|
|
||||||
DownloadPort int32 `form:"download_port" binding:"omitempty"`
|
|
||||||
CDNClusterID uint `form:"cdn_cluster_id" binding:"omitempty"`
|
|
||||||
Page int `form:"page" binding:"omitempty,gte=1"`
|
|
||||||
PerPage int `form:"per_page" binding:"omitempty,gte=1,lte=50"`
|
|
||||||
State string `form:"state" binding:"omitempty,oneof=active inactive"`
|
|
||||||
}
|
|
||||||
|
|
@ -1,56 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2020 The Dragonfly Authors
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package types
|
|
||||||
|
|
||||||
type CDNClusterParams struct {
|
|
||||||
ID uint `uri:"id" binding:"required"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type AddCDNToCDNClusterParams struct {
|
|
||||||
ID uint `uri:"id" binding:"required"`
|
|
||||||
CDNID uint `uri:"cdn_id" binding:"required"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type AddSchedulerClusterToCDNClusterParams struct {
|
|
||||||
ID uint `uri:"id" binding:"required"`
|
|
||||||
SchedulerClusterID uint `uri:"scheduler_cluster_id" binding:"required"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type CreateCDNClusterRequest struct {
|
|
||||||
Name string `json:"name" binding:"required"`
|
|
||||||
BIO string `json:"bio" binding:"omitempty"`
|
|
||||||
Config *CDNClusterConfig `json:"config" binding:"required"`
|
|
||||||
IsDefault bool `json:"is_default" binding:"omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type UpdateCDNClusterRequest struct {
|
|
||||||
Name string `json:"name" binding:"omitempty"`
|
|
||||||
BIO string `json:"bio" binding:"omitempty"`
|
|
||||||
Config *CDNClusterConfig `json:"config" binding:"omitempty"`
|
|
||||||
IsDefault bool `json:"is_default" binding:"omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type GetCDNClustersQuery struct {
|
|
||||||
Name string `form:"name" binding:"omitempty"`
|
|
||||||
Page int `form:"page" binding:"omitempty,gte=1"`
|
|
||||||
PerPage int `form:"per_page" binding:"omitempty,gte=1,lte=50"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type CDNClusterConfig struct {
|
|
||||||
LoadLimit uint32 `yaml:"loadLimit" mapstructure:"loadLimit" json:"load_limit" binding:"omitempty,gte=1,lte=5000"`
|
|
||||||
NetTopology string `yaml:"netTopology" mapstructure:"netTopology" json:"net_topology"`
|
|
||||||
}
|
|
||||||
|
|
@ -23,7 +23,6 @@ type CreateJobRequest struct {
|
||||||
Result map[string]interface{} `json:"result" binding:"omitempty"`
|
Result map[string]interface{} `json:"result" binding:"omitempty"`
|
||||||
UserID uint `json:"user_id" binding:"omitempty"`
|
UserID uint `json:"user_id" binding:"omitempty"`
|
||||||
SeedPeerClusterIDs []uint `json:"seed_peer_cluster_ids" binding:"omitempty"`
|
SeedPeerClusterIDs []uint `json:"seed_peer_cluster_ids" binding:"omitempty"`
|
||||||
CDNClusterIDs []uint `json:"cdn_cluster_ids" binding:"omitempty"`
|
|
||||||
SchedulerClusterIDs []uint `json:"scheduler_cluster_ids" binding:"omitempty"`
|
SchedulerClusterIDs []uint `json:"scheduler_cluster_ids" binding:"omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -33,7 +33,6 @@ type CreateSchedulerClusterRequest struct {
|
||||||
Scopes *SchedulerClusterScopes `json:"scopes" binding:"omitempty"`
|
Scopes *SchedulerClusterScopes `json:"scopes" binding:"omitempty"`
|
||||||
IsDefault bool `json:"is_default" binding:"omitempty"`
|
IsDefault bool `json:"is_default" binding:"omitempty"`
|
||||||
SeedPeerClusterID uint `json:"seed_peer_cluster_id" binding:"omitempty"`
|
SeedPeerClusterID uint `json:"seed_peer_cluster_id" binding:"omitempty"`
|
||||||
CDNClusterID uint `json:"cdn_cluster_id" binding:"omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type UpdateSchedulerClusterRequest struct {
|
type UpdateSchedulerClusterRequest struct {
|
||||||
|
|
@ -44,7 +43,6 @@ type UpdateSchedulerClusterRequest struct {
|
||||||
Scopes *SchedulerClusterScopes `json:"scopes" binding:"omitempty"`
|
Scopes *SchedulerClusterScopes `json:"scopes" binding:"omitempty"`
|
||||||
IsDefault bool `json:"is_default" binding:"omitempty"`
|
IsDefault bool `json:"is_default" binding:"omitempty"`
|
||||||
SeedPeerClusterID uint `json:"seed_peer_cluster_id" binding:"omitempty"`
|
SeedPeerClusterID uint `json:"seed_peer_cluster_id" binding:"omitempty"`
|
||||||
CDNClusterID uint `json:"cdn_cluster_id" binding:"omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type GetSchedulerClustersQuery struct {
|
type GetSchedulerClustersQuery struct {
|
||||||
|
|
|
||||||
|
|
@ -30,11 +30,6 @@ type AddSeedPeerClusterToSecurityGroupParams struct {
|
||||||
SeedPeerClusterID uint `uri:"seed_peer_cluster_id" binding:"required"`
|
SeedPeerClusterID uint `uri:"seed_peer_cluster_id" binding:"required"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type AddCDNClusterToSecurityGroupParams struct {
|
|
||||||
ID uint `uri:"id" binding:"required"`
|
|
||||||
CDNClusterID uint `uri:"cdn_cluster_id" binding:"required"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type AddSecurityRuleToSecurityGroupParams struct {
|
type AddSecurityRuleToSecurityGroupParams struct {
|
||||||
ID uint `uri:"id" binding:"required"`
|
ID uint `uri:"id" binding:"required"`
|
||||||
SecurityRuleID uint `uri:"security_rule_id" binding:"required"`
|
SecurityRuleID uint `uri:"security_rule_id" binding:"required"`
|
||||||
|
|
|
||||||
|
|
@ -22,7 +22,7 @@ type SeedPeerParams struct {
|
||||||
|
|
||||||
type CreateSeedPeerRequest struct {
|
type CreateSeedPeerRequest struct {
|
||||||
HostName string `json:"host_name" binding:"required"`
|
HostName string `json:"host_name" binding:"required"`
|
||||||
Type string `json:"type" binding:"required"`
|
Type string `json:"type" binding:"required,oneof=super strong weak"`
|
||||||
IDC string `json:"idc" binding:"omitempty"`
|
IDC string `json:"idc" binding:"omitempty"`
|
||||||
NetTopology string `json:"net_topology" binding:"omitempty"`
|
NetTopology string `json:"net_topology" binding:"omitempty"`
|
||||||
Location string `json:"location" binding:"omitempty"`
|
Location string `json:"location" binding:"omitempty"`
|
||||||
|
|
@ -33,7 +33,7 @@ type CreateSeedPeerRequest struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
type UpdateSeedPeerRequest struct {
|
type UpdateSeedPeerRequest struct {
|
||||||
Type string `json:"type" binding:"omitempty"`
|
Type string `json:"type" binding:"omitempty,oneof=super strong weak"`
|
||||||
IDC string `json:"idc" binding:"omitempty"`
|
IDC string `json:"idc" binding:"omitempty"`
|
||||||
NetTopology string `json:"net_topology" binding:"omitempty"`
|
NetTopology string `json:"net_topology" binding:"omitempty"`
|
||||||
Location string `json:"location" binding:"omitempty"`
|
Location string `json:"location" binding:"omitempty"`
|
||||||
|
|
@ -45,7 +45,7 @@ type UpdateSeedPeerRequest struct {
|
||||||
|
|
||||||
type GetSeedPeersQuery struct {
|
type GetSeedPeersQuery struct {
|
||||||
HostName string `form:"host_name" binding:"omitempty"`
|
HostName string `form:"host_name" binding:"omitempty"`
|
||||||
Type string `form:"type" binding:"omitempty"`
|
Type string `form:"type" binding:"omitempty,oneof=super strong weak"`
|
||||||
IDC string `form:"idc" binding:"omitempty"`
|
IDC string `form:"idc" binding:"omitempty"`
|
||||||
Location string `form:"location" binding:"omitempty"`
|
Location string `form:"location" binding:"omitempty"`
|
||||||
IP string `form:"ip" binding:"omitempty"`
|
IP string `form:"ip" binding:"omitempty"`
|
||||||
|
|
|
||||||
|
|
@ -24,10 +24,6 @@ func HostID(hostname string, port int32) string {
|
||||||
return fmt.Sprintf("%s-%d", hostname, port)
|
return fmt.Sprintf("%s-%d", hostname, port)
|
||||||
}
|
}
|
||||||
|
|
||||||
func SeedHostID(hostname string, port int32) string {
|
|
||||||
return fmt.Sprintf("%s_Seed", HostID(hostname, port))
|
|
||||||
}
|
|
||||||
|
|
||||||
func CDNHostID(hostname string, port int32) string {
|
func CDNHostID(hostname string, port int32) string {
|
||||||
return fmt.Sprintf("%s_CDN", HostID(hostname, port))
|
return fmt.Sprintf("%s_CDN", HostID(hostname, port))
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -65,49 +65,6 @@ func TestHostID(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSeedHostID(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
hostname string
|
|
||||||
port int32
|
|
||||||
expect func(t *testing.T, d string)
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "generate SeedHostID with ipv4",
|
|
||||||
hostname: "foo",
|
|
||||||
port: 8000,
|
|
||||||
expect: func(t *testing.T, d string) {
|
|
||||||
assert := assert.New(t)
|
|
||||||
assert.Equal(d, "foo-8000_Seed")
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "generate SeedHostID with empty host",
|
|
||||||
hostname: "",
|
|
||||||
port: 8000,
|
|
||||||
expect: func(t *testing.T, d string) {
|
|
||||||
assert := assert.New(t)
|
|
||||||
assert.Equal(d, "-8000_Seed")
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "generate SeedHostID with zero port",
|
|
||||||
hostname: "foo",
|
|
||||||
port: 0,
|
|
||||||
expect: func(t *testing.T, d string) {
|
|
||||||
assert := assert.New(t)
|
|
||||||
assert.Equal(d, "foo-0_Seed")
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range tests {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
tc.expect(t, SeedHostID(tc.hostname, tc.port))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCDNHostID(t *testing.T) {
|
func TestCDNHostID(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
|
|
|
||||||
|
|
@ -113,8 +113,8 @@ type PieceSeed struct {
|
||||||
|
|
||||||
// peer id for cdn node, need suffix with _CDN
|
// peer id for cdn node, need suffix with _CDN
|
||||||
PeerId string `protobuf:"bytes,2,opt,name=peer_id,json=peerId,proto3" json:"peer_id,omitempty"`
|
PeerId string `protobuf:"bytes,2,opt,name=peer_id,json=peerId,proto3" json:"peer_id,omitempty"`
|
||||||
// cdn node host uuid
|
// cdn host id
|
||||||
HostUuid string `protobuf:"bytes,3,opt,name=host_uuid,json=hostUuid,proto3" json:"host_uuid,omitempty"`
|
HostId string `protobuf:"bytes,3,opt,name=host_id,json=hostId,proto3" json:"host_id,omitempty"`
|
||||||
PieceInfo *base.PieceInfo `protobuf:"bytes,4,opt,name=piece_info,json=pieceInfo,proto3" json:"piece_info,omitempty"`
|
PieceInfo *base.PieceInfo `protobuf:"bytes,4,opt,name=piece_info,json=pieceInfo,proto3" json:"piece_info,omitempty"`
|
||||||
// whether or not all seeds are downloaded
|
// whether or not all seeds are downloaded
|
||||||
Done bool `protobuf:"varint,5,opt,name=done,proto3" json:"done,omitempty"`
|
Done bool `protobuf:"varint,5,opt,name=done,proto3" json:"done,omitempty"`
|
||||||
|
|
@ -167,9 +167,9 @@ func (x *PieceSeed) GetPeerId() string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *PieceSeed) GetHostUuid() string {
|
func (x *PieceSeed) GetHostId() string {
|
||||||
if x != nil {
|
if x != nil {
|
||||||
return x.HostUuid
|
return x.HostId
|
||||||
}
|
}
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
@ -232,41 +232,41 @@ var file_pkg_rpc_cdnsystem_cdnsystem_proto_rawDesc = []byte{
|
||||||
0xfa, 0x42, 0x05, 0x72, 0x03, 0x88, 0x01, 0x01, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x28, 0x0a,
|
0xfa, 0x42, 0x05, 0x72, 0x03, 0x88, 0x01, 0x01, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x28, 0x0a,
|
||||||
0x08, 0x75, 0x72, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
|
0x08, 0x75, 0x72, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
|
||||||
0x0d, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x55, 0x72, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x07,
|
0x0d, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x55, 0x72, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x07,
|
||||||
0x75, 0x72, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x22, 0xa4, 0x02, 0x0a, 0x09, 0x50, 0x69, 0x65, 0x63,
|
0x75, 0x72, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x22, 0xa0, 0x02, 0x0a, 0x09, 0x50, 0x69, 0x65, 0x63,
|
||||||
0x65, 0x53, 0x65, 0x65, 0x64, 0x12, 0x20, 0x0a, 0x07, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64,
|
0x65, 0x53, 0x65, 0x65, 0x64, 0x12, 0x20, 0x0a, 0x07, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64,
|
||||||
0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52,
|
0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52,
|
||||||
0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, 0x12, 0x24, 0x0a, 0x09, 0x68, 0x6f, 0x73, 0x74, 0x5f,
|
0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x07, 0x68, 0x6f, 0x73, 0x74, 0x5f,
|
||||||
0x75, 0x75, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72,
|
0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10,
|
||||||
0x02, 0x10, 0x01, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x55, 0x75, 0x69, 0x64, 0x12, 0x2e, 0x0a,
|
0x01, 0x52, 0x06, 0x68, 0x6f, 0x73, 0x74, 0x49, 0x64, 0x12, 0x2e, 0x0a, 0x0a, 0x70, 0x69, 0x65,
|
||||||
0x0a, 0x70, 0x69, 0x65, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28,
|
0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
|
||||||
0x0b, 0x32, 0x0f, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65, 0x49, 0x6e,
|
0x62, 0x61, 0x73, 0x65, 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09,
|
||||||
0x66, 0x6f, 0x52, 0x09, 0x70, 0x69, 0x65, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a,
|
0x70, 0x69, 0x65, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x6f, 0x6e,
|
||||||
0x04, 0x64, 0x6f, 0x6e, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x64, 0x6f, 0x6e,
|
0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x12, 0x25, 0x0a,
|
||||||
0x65, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x65, 0x6e,
|
0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18,
|
||||||
0x67, 0x74, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65,
|
0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4c, 0x65,
|
||||||
0x6e, 0x74, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x2a, 0x0a, 0x11, 0x74, 0x6f, 0x74, 0x61,
|
0x6e, 0x67, 0x74, 0x68, 0x12, 0x2a, 0x0a, 0x11, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x69,
|
||||||
0x6c, 0x5f, 0x70, 0x69, 0x65, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20,
|
0x65, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52,
|
||||||
0x01, 0x28, 0x05, 0x52, 0x0f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x69, 0x65, 0x63, 0x65, 0x43,
|
0x0f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x69, 0x65, 0x63, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74,
|
||||||
0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x5f, 0x74, 0x69,
|
0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x08,
|
||||||
0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x54,
|
0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12,
|
||||||
0x69, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18,
|
0x19, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28,
|
||||||
0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x32, 0xc4,
|
0x04, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x32, 0xc4, 0x01, 0x0a, 0x06, 0x53,
|
||||||
0x01, 0x0a, 0x06, 0x53, 0x65, 0x65, 0x64, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x0b, 0x4f, 0x62, 0x74,
|
0x65, 0x65, 0x64, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x0b, 0x4f, 0x62, 0x74, 0x61, 0x69, 0x6e, 0x53,
|
||||||
0x61, 0x69, 0x6e, 0x53, 0x65, 0x65, 0x64, 0x73, 0x12, 0x16, 0x2e, 0x63, 0x64, 0x6e, 0x73, 0x79,
|
0x65, 0x65, 0x64, 0x73, 0x12, 0x16, 0x2e, 0x63, 0x64, 0x6e, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d,
|
||||||
0x73, 0x74, 0x65, 0x6d, 0x2e, 0x53, 0x65, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
|
0x2e, 0x53, 0x65, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x63,
|
||||||
0x1a, 0x14, 0x2e, 0x63, 0x64, 0x6e, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2e, 0x50, 0x69, 0x65,
|
0x64, 0x6e, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65, 0x53, 0x65,
|
||||||
0x63, 0x65, 0x53, 0x65, 0x65, 0x64, 0x30, 0x01, 0x12, 0x3a, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x50,
|
0x65, 0x64, 0x30, 0x01, 0x12, 0x3a, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x50, 0x69, 0x65, 0x63, 0x65,
|
||||||
0x69, 0x65, 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x16, 0x2e, 0x62, 0x61, 0x73, 0x65,
|
0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x16, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x50, 0x69, 0x65,
|
||||||
0x2e, 0x50, 0x69, 0x65, 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
|
0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e,
|
||||||
0x74, 0x1a, 0x11, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65, 0x50, 0x61,
|
0x62, 0x61, 0x73, 0x65, 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74,
|
||||||
0x63, 0x6b, 0x65, 0x74, 0x12, 0x3f, 0x0a, 0x0e, 0x53, 0x79, 0x6e, 0x63, 0x50, 0x69, 0x65, 0x63,
|
0x12, 0x3f, 0x0a, 0x0e, 0x53, 0x79, 0x6e, 0x63, 0x50, 0x69, 0x65, 0x63, 0x65, 0x54, 0x61, 0x73,
|
||||||
0x65, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x16, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x50, 0x69,
|
0x6b, 0x73, 0x12, 0x16, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65, 0x54,
|
||||||
0x65, 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11,
|
0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x62, 0x61, 0x73,
|
||||||
0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65, 0x50, 0x61, 0x63, 0x6b, 0x65,
|
0x65, 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x28, 0x01, 0x30,
|
||||||
0x74, 0x28, 0x01, 0x30, 0x01, 0x42, 0x27, 0x5a, 0x25, 0x64, 0x37, 0x79, 0x2e, 0x69, 0x6f, 0x2f,
|
0x01, 0x42, 0x27, 0x5a, 0x25, 0x64, 0x37, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x64, 0x72, 0x61, 0x67,
|
||||||
0x64, 0x72, 0x61, 0x67, 0x6f, 0x6e, 0x66, 0x6c, 0x79, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x6b, 0x67,
|
0x6f, 0x6e, 0x66, 0x6c, 0x79, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x72, 0x70, 0x63,
|
||||||
0x2f, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x64, 0x6e, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x62, 0x06,
|
0x2f, 0x63, 0x64, 0x6e, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
|
||||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
0x6f, 0x33,
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
|
|
||||||
|
|
@ -142,9 +142,9 @@ func (m *PieceSeed) Validate() error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if utf8.RuneCountInString(m.GetHostUuid()) < 1 {
|
if utf8.RuneCountInString(m.GetHostId()) < 1 {
|
||||||
return PieceSeedValidationError{
|
return PieceSeedValidationError{
|
||||||
field: "HostUuid",
|
field: "HostId",
|
||||||
reason: "value length must be at least 1 runes",
|
reason: "value length must be at least 1 runes",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -34,8 +34,8 @@ message SeedRequest{
|
||||||
message PieceSeed{
|
message PieceSeed{
|
||||||
// peer id for cdn node, need suffix with _CDN
|
// peer id for cdn node, need suffix with _CDN
|
||||||
string peer_id = 2 [(validate.rules).string.min_len = 1];
|
string peer_id = 2 [(validate.rules).string.min_len = 1];
|
||||||
// cdn node host uuid
|
// cdn host id
|
||||||
string host_uuid = 3 [(validate.rules).string.min_len = 1];
|
string host_id = 3 [(validate.rules).string.min_len = 1];
|
||||||
base.PieceInfo piece_info = 4;
|
base.PieceInfo piece_info = 4;
|
||||||
|
|
||||||
// whether or not all seeds are downloaded
|
// whether or not all seeds are downloaded
|
||||||
|
|
|
||||||
|
|
@ -31,17 +31,13 @@ import (
|
||||||
"d7y.io/dragonfly/v2/pkg/rpc/cdnsystem"
|
"d7y.io/dragonfly/v2/pkg/rpc/cdnsystem"
|
||||||
)
|
)
|
||||||
|
|
||||||
func GetClientByAddr(addrs []dfnet.NetAddr, opts ...grpc.DialOption) (CdnClient, error) {
|
func GetClientByAddr(addrs []dfnet.NetAddr, opts ...grpc.DialOption) CdnClient {
|
||||||
if len(addrs) == 0 {
|
return &cdnClient{
|
||||||
return nil, errors.New("address list of cdn is empty")
|
|
||||||
}
|
|
||||||
cc := &cdnClient{
|
|
||||||
rpc.NewConnection(context.Background(), "cdn", addrs, []rpc.ConnOption{
|
rpc.NewConnection(context.Background(), "cdn", addrs, []rpc.ConnOption{
|
||||||
rpc.WithConnExpireTime(60 * time.Second),
|
rpc.WithConnExpireTime(60 * time.Second),
|
||||||
rpc.WithDialOption(opts),
|
rpc.WithDialOption(opts),
|
||||||
}),
|
}),
|
||||||
}
|
}
|
||||||
return cc, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var once sync.Once
|
var once sync.Once
|
||||||
|
|
|
||||||
|
|
@ -28,7 +28,9 @@ import (
|
||||||
grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
|
grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/backoff"
|
"google.golang.org/grpc/backoff"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/credentials/insecure"
|
"google.golang.org/grpc/credentials/insecure"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
|
||||||
logger "d7y.io/dragonfly/v2/internal/dflog"
|
logger "d7y.io/dragonfly/v2/internal/dflog"
|
||||||
"d7y.io/dragonfly/v2/pkg/dfnet"
|
"d7y.io/dragonfly/v2/pkg/dfnet"
|
||||||
|
|
@ -48,9 +50,6 @@ type Client interface {
|
||||||
// Update Seed peer configuration.
|
// Update Seed peer configuration.
|
||||||
UpdateSeedPeer(*manager.UpdateSeedPeerRequest) (*manager.SeedPeer, error)
|
UpdateSeedPeer(*manager.UpdateSeedPeerRequest) (*manager.SeedPeer, error)
|
||||||
|
|
||||||
// Update CDN configuration.
|
|
||||||
UpdateCDN(*manager.UpdateCDNRequest) (*manager.CDN, error)
|
|
||||||
|
|
||||||
// Get Scheduler and Scheduler cluster configuration.
|
// Get Scheduler and Scheduler cluster configuration.
|
||||||
GetScheduler(*manager.GetSchedulerRequest) (*manager.Scheduler, error)
|
GetScheduler(*manager.GetSchedulerRequest) (*manager.Scheduler, error)
|
||||||
|
|
||||||
|
|
@ -120,13 +119,6 @@ func (c *client) UpdateSeedPeer(req *manager.UpdateSeedPeerRequest) (*manager.Se
|
||||||
return c.ManagerClient.UpdateSeedPeer(ctx, req)
|
return c.ManagerClient.UpdateSeedPeer(ctx, req)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *client) UpdateCDN(req *manager.UpdateCDNRequest) (*manager.CDN, error) {
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), contextTimeout)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
return c.ManagerClient.UpdateCDN(ctx, req)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *client) GetScheduler(req *manager.GetSchedulerRequest) (*manager.Scheduler, error) {
|
func (c *client) GetScheduler(req *manager.GetSchedulerRequest) (*manager.Scheduler, error) {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), contextTimeout)
|
ctx, cancel := context.WithTimeout(context.Background(), contextTimeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
@ -153,6 +145,12 @@ retry:
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
stream, err := c.ManagerClient.KeepAlive(ctx)
|
stream, err := c.ManagerClient.KeepAlive(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if status.Code(err) == codes.Canceled {
|
||||||
|
logger.Infof("hostname %s cluster id %d stop keepalive", keepalive.HostName, keepalive.ClusterId)
|
||||||
|
cancel()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
time.Sleep(interval)
|
time.Sleep(interval)
|
||||||
cancel()
|
cancel()
|
||||||
goto retry
|
goto retry
|
||||||
|
|
@ -168,7 +166,7 @@ retry:
|
||||||
ClusterId: keepalive.ClusterId,
|
ClusterId: keepalive.ClusterId,
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
if _, err := stream.CloseAndRecv(); err != nil {
|
if _, err := stream.CloseAndRecv(); err != nil {
|
||||||
logger.Errorf("hostname %s cluster id %v close and recv stream failed: %v", keepalive.HostName, keepalive.ClusterId, err)
|
logger.Errorf("hostname %s cluster id %d close and recv stream failed: %v", keepalive.HostName, keepalive.ClusterId, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
cancel()
|
cancel()
|
||||||
|
|
|
||||||
|
|
@ -91,21 +91,6 @@ func (mr *MockClientMockRecorder) ListSchedulers(arg0 interface{}) *gomock.Call
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSchedulers", reflect.TypeOf((*MockClient)(nil).ListSchedulers), arg0)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSchedulers", reflect.TypeOf((*MockClient)(nil).ListSchedulers), arg0)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateCDN mocks base method.
|
|
||||||
func (m *MockClient) UpdateCDN(arg0 *manager.UpdateCDNRequest) (*manager.CDN, error) {
|
|
||||||
m.ctrl.T.Helper()
|
|
||||||
ret := m.ctrl.Call(m, "UpdateCDN", arg0)
|
|
||||||
ret0, _ := ret[0].(*manager.CDN)
|
|
||||||
ret1, _ := ret[1].(error)
|
|
||||||
return ret0, ret1
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateCDN indicates an expected call of UpdateCDN.
|
|
||||||
func (mr *MockClientMockRecorder) UpdateCDN(arg0 interface{}) *gomock.Call {
|
|
||||||
mr.mock.ctrl.T.Helper()
|
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateCDN", reflect.TypeOf((*MockClient)(nil).UpdateCDN), arg0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateScheduler mocks base method.
|
// UpdateScheduler mocks base method.
|
||||||
func (m *MockClient) UpdateScheduler(arg0 *manager.UpdateSchedulerRequest) (*manager.Scheduler, error) {
|
func (m *MockClient) UpdateScheduler(arg0 *manager.UpdateSchedulerRequest) (*manager.Scheduler, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
|
|
|
||||||
File diff suppressed because it is too large
Load Diff
|
|
@ -108,472 +108,6 @@ var _ interface {
|
||||||
ErrorName() string
|
ErrorName() string
|
||||||
} = SecurityGroupValidationError{}
|
} = SecurityGroupValidationError{}
|
||||||
|
|
||||||
// Validate checks the field values on CDNCluster with the rules defined in the
|
|
||||||
// proto definition for this message. If any rules are violated, an error is returned.
|
|
||||||
func (m *CDNCluster) Validate() error {
|
|
||||||
if m == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// no validation rules for Id
|
|
||||||
|
|
||||||
// no validation rules for Name
|
|
||||||
|
|
||||||
// no validation rules for Bio
|
|
||||||
|
|
||||||
// no validation rules for Config
|
|
||||||
|
|
||||||
if v, ok := interface{}(m.GetSecurityGroup()).(interface{ Validate() error }); ok {
|
|
||||||
if err := v.Validate(); err != nil {
|
|
||||||
return CDNClusterValidationError{
|
|
||||||
field: "SecurityGroup",
|
|
||||||
reason: "embedded message failed validation",
|
|
||||||
cause: err,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CDNClusterValidationError is the validation error returned by
|
|
||||||
// CDNCluster.Validate if the designated constraints aren't met.
|
|
||||||
type CDNClusterValidationError struct {
|
|
||||||
field string
|
|
||||||
reason string
|
|
||||||
cause error
|
|
||||||
key bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Field function returns field value.
|
|
||||||
func (e CDNClusterValidationError) Field() string { return e.field }
|
|
||||||
|
|
||||||
// Reason function returns reason value.
|
|
||||||
func (e CDNClusterValidationError) Reason() string { return e.reason }
|
|
||||||
|
|
||||||
// Cause function returns cause value.
|
|
||||||
func (e CDNClusterValidationError) Cause() error { return e.cause }
|
|
||||||
|
|
||||||
// Key function returns key value.
|
|
||||||
func (e CDNClusterValidationError) Key() bool { return e.key }
|
|
||||||
|
|
||||||
// ErrorName returns error name.
|
|
||||||
func (e CDNClusterValidationError) ErrorName() string { return "CDNClusterValidationError" }
|
|
||||||
|
|
||||||
// Error satisfies the builtin error interface
|
|
||||||
func (e CDNClusterValidationError) Error() string {
|
|
||||||
cause := ""
|
|
||||||
if e.cause != nil {
|
|
||||||
cause = fmt.Sprintf(" | caused by: %v", e.cause)
|
|
||||||
}
|
|
||||||
|
|
||||||
key := ""
|
|
||||||
if e.key {
|
|
||||||
key = "key for "
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Sprintf(
|
|
||||||
"invalid %sCDNCluster.%s: %s%s",
|
|
||||||
key,
|
|
||||||
e.field,
|
|
||||||
e.reason,
|
|
||||||
cause)
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ error = CDNClusterValidationError{}
|
|
||||||
|
|
||||||
var _ interface {
|
|
||||||
Field() string
|
|
||||||
Reason() string
|
|
||||||
Key() bool
|
|
||||||
Cause() error
|
|
||||||
ErrorName() string
|
|
||||||
} = CDNClusterValidationError{}
|
|
||||||
|
|
||||||
// Validate checks the field values on CDN with the rules defined in the proto
|
|
||||||
// definition for this message. If any rules are violated, an error is returned.
|
|
||||||
func (m *CDN) Validate() error {
|
|
||||||
if m == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// no validation rules for Id
|
|
||||||
|
|
||||||
// no validation rules for HostName
|
|
||||||
|
|
||||||
// no validation rules for Idc
|
|
||||||
|
|
||||||
// no validation rules for Location
|
|
||||||
|
|
||||||
// no validation rules for Ip
|
|
||||||
|
|
||||||
// no validation rules for Port
|
|
||||||
|
|
||||||
// no validation rules for DownloadPort
|
|
||||||
|
|
||||||
// no validation rules for State
|
|
||||||
|
|
||||||
// no validation rules for CdnClusterId
|
|
||||||
|
|
||||||
if v, ok := interface{}(m.GetCdnCluster()).(interface{ Validate() error }); ok {
|
|
||||||
if err := v.Validate(); err != nil {
|
|
||||||
return CDNValidationError{
|
|
||||||
field: "CdnCluster",
|
|
||||||
reason: "embedded message failed validation",
|
|
||||||
cause: err,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for idx, item := range m.GetSchedulers() {
|
|
||||||
_, _ = idx, item
|
|
||||||
|
|
||||||
if v, ok := interface{}(item).(interface{ Validate() error }); ok {
|
|
||||||
if err := v.Validate(); err != nil {
|
|
||||||
return CDNValidationError{
|
|
||||||
field: fmt.Sprintf("Schedulers[%v]", idx),
|
|
||||||
reason: "embedded message failed validation",
|
|
||||||
cause: err,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CDNValidationError is the validation error returned by CDN.Validate if the
|
|
||||||
// designated constraints aren't met.
|
|
||||||
type CDNValidationError struct {
|
|
||||||
field string
|
|
||||||
reason string
|
|
||||||
cause error
|
|
||||||
key bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Field function returns field value.
|
|
||||||
func (e CDNValidationError) Field() string { return e.field }
|
|
||||||
|
|
||||||
// Reason function returns reason value.
|
|
||||||
func (e CDNValidationError) Reason() string { return e.reason }
|
|
||||||
|
|
||||||
// Cause function returns cause value.
|
|
||||||
func (e CDNValidationError) Cause() error { return e.cause }
|
|
||||||
|
|
||||||
// Key function returns key value.
|
|
||||||
func (e CDNValidationError) Key() bool { return e.key }
|
|
||||||
|
|
||||||
// ErrorName returns error name.
|
|
||||||
func (e CDNValidationError) ErrorName() string { return "CDNValidationError" }
|
|
||||||
|
|
||||||
// Error satisfies the builtin error interface
|
|
||||||
func (e CDNValidationError) Error() string {
|
|
||||||
cause := ""
|
|
||||||
if e.cause != nil {
|
|
||||||
cause = fmt.Sprintf(" | caused by: %v", e.cause)
|
|
||||||
}
|
|
||||||
|
|
||||||
key := ""
|
|
||||||
if e.key {
|
|
||||||
key = "key for "
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Sprintf(
|
|
||||||
"invalid %sCDN.%s: %s%s",
|
|
||||||
key,
|
|
||||||
e.field,
|
|
||||||
e.reason,
|
|
||||||
cause)
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ error = CDNValidationError{}
|
|
||||||
|
|
||||||
var _ interface {
|
|
||||||
Field() string
|
|
||||||
Reason() string
|
|
||||||
Key() bool
|
|
||||||
Cause() error
|
|
||||||
ErrorName() string
|
|
||||||
} = CDNValidationError{}
|
|
||||||
|
|
||||||
// Validate checks the field values on GetCDNRequest with the rules defined in
|
|
||||||
// the proto definition for this message. If any rules are violated, an error
|
|
||||||
// is returned.
|
|
||||||
func (m *GetCDNRequest) Validate() error {
|
|
||||||
if m == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, ok := SourceType_name[int32(m.GetSourceType())]; !ok {
|
|
||||||
return GetCDNRequestValidationError{
|
|
||||||
field: "SourceType",
|
|
||||||
reason: "value must be one of the defined enum values",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := m._validateHostname(m.GetHostName()); err != nil {
|
|
||||||
return GetCDNRequestValidationError{
|
|
||||||
field: "HostName",
|
|
||||||
reason: "value must be a valid hostname",
|
|
||||||
cause: err,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if m.GetCdnClusterId() < 1 {
|
|
||||||
return GetCDNRequestValidationError{
|
|
||||||
field: "CdnClusterId",
|
|
||||||
reason: "value must be greater than or equal to 1",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *GetCDNRequest) _validateHostname(host string) error {
|
|
||||||
s := strings.ToLower(strings.TrimSuffix(host, "."))
|
|
||||||
|
|
||||||
if len(host) > 253 {
|
|
||||||
return errors.New("hostname cannot exceed 253 characters")
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, part := range strings.Split(s, ".") {
|
|
||||||
if l := len(part); l == 0 || l > 63 {
|
|
||||||
return errors.New("hostname part must be non-empty and cannot exceed 63 characters")
|
|
||||||
}
|
|
||||||
|
|
||||||
if part[0] == '-' {
|
|
||||||
return errors.New("hostname parts cannot begin with hyphens")
|
|
||||||
}
|
|
||||||
|
|
||||||
if part[len(part)-1] == '-' {
|
|
||||||
return errors.New("hostname parts cannot end with hyphens")
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, r := range part {
|
|
||||||
if (r < 'a' || r > 'z') && (r < '0' || r > '9') && r != '-' {
|
|
||||||
return fmt.Errorf("hostname parts can only contain alphanumeric characters or hyphens, got %q", string(r))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetCDNRequestValidationError is the validation error returned by
|
|
||||||
// GetCDNRequest.Validate if the designated constraints aren't met.
|
|
||||||
type GetCDNRequestValidationError struct {
|
|
||||||
field string
|
|
||||||
reason string
|
|
||||||
cause error
|
|
||||||
key bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Field function returns field value.
|
|
||||||
func (e GetCDNRequestValidationError) Field() string { return e.field }
|
|
||||||
|
|
||||||
// Reason function returns reason value.
|
|
||||||
func (e GetCDNRequestValidationError) Reason() string { return e.reason }
|
|
||||||
|
|
||||||
// Cause function returns cause value.
|
|
||||||
func (e GetCDNRequestValidationError) Cause() error { return e.cause }
|
|
||||||
|
|
||||||
// Key function returns key value.
|
|
||||||
func (e GetCDNRequestValidationError) Key() bool { return e.key }
|
|
||||||
|
|
||||||
// ErrorName returns error name.
|
|
||||||
func (e GetCDNRequestValidationError) ErrorName() string { return "GetCDNRequestValidationError" }
|
|
||||||
|
|
||||||
// Error satisfies the builtin error interface
|
|
||||||
func (e GetCDNRequestValidationError) Error() string {
|
|
||||||
cause := ""
|
|
||||||
if e.cause != nil {
|
|
||||||
cause = fmt.Sprintf(" | caused by: %v", e.cause)
|
|
||||||
}
|
|
||||||
|
|
||||||
key := ""
|
|
||||||
if e.key {
|
|
||||||
key = "key for "
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Sprintf(
|
|
||||||
"invalid %sGetCDNRequest.%s: %s%s",
|
|
||||||
key,
|
|
||||||
e.field,
|
|
||||||
e.reason,
|
|
||||||
cause)
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ error = GetCDNRequestValidationError{}
|
|
||||||
|
|
||||||
var _ interface {
|
|
||||||
Field() string
|
|
||||||
Reason() string
|
|
||||||
Key() bool
|
|
||||||
Cause() error
|
|
||||||
ErrorName() string
|
|
||||||
} = GetCDNRequestValidationError{}
|
|
||||||
|
|
||||||
// Validate checks the field values on UpdateCDNRequest with the rules defined
|
|
||||||
// in the proto definition for this message. If any rules are violated, an
|
|
||||||
// error is returned.
|
|
||||||
func (m *UpdateCDNRequest) Validate() error {
|
|
||||||
if m == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, ok := SourceType_name[int32(m.GetSourceType())]; !ok {
|
|
||||||
return UpdateCDNRequestValidationError{
|
|
||||||
field: "SourceType",
|
|
||||||
reason: "value must be one of the defined enum values",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := m._validateHostname(m.GetHostName()); err != nil {
|
|
||||||
return UpdateCDNRequestValidationError{
|
|
||||||
field: "HostName",
|
|
||||||
reason: "value must be a valid hostname",
|
|
||||||
cause: err,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if m.GetIdc() != "" {
|
|
||||||
|
|
||||||
if l := utf8.RuneCountInString(m.GetIdc()); l < 1 || l > 1024 {
|
|
||||||
return UpdateCDNRequestValidationError{
|
|
||||||
field: "Idc",
|
|
||||||
reason: "value length must be between 1 and 1024 runes, inclusive",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
if m.GetLocation() != "" {
|
|
||||||
|
|
||||||
if utf8.RuneCountInString(m.GetLocation()) > 1024 {
|
|
||||||
return UpdateCDNRequestValidationError{
|
|
||||||
field: "Location",
|
|
||||||
reason: "value length must be at most 1024 runes",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
if ip := net.ParseIP(m.GetIp()); ip == nil {
|
|
||||||
return UpdateCDNRequestValidationError{
|
|
||||||
field: "Ip",
|
|
||||||
reason: "value must be a valid IP address",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if val := m.GetPort(); val < 1024 || val >= 65535 {
|
|
||||||
return UpdateCDNRequestValidationError{
|
|
||||||
field: "Port",
|
|
||||||
reason: "value must be inside range [1024, 65535)",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if val := m.GetDownloadPort(); val < 1024 || val >= 65535 {
|
|
||||||
return UpdateCDNRequestValidationError{
|
|
||||||
field: "DownloadPort",
|
|
||||||
reason: "value must be inside range [1024, 65535)",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if m.GetCdnClusterId() < 1 {
|
|
||||||
return UpdateCDNRequestValidationError{
|
|
||||||
field: "CdnClusterId",
|
|
||||||
reason: "value must be greater than or equal to 1",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *UpdateCDNRequest) _validateHostname(host string) error {
|
|
||||||
s := strings.ToLower(strings.TrimSuffix(host, "."))
|
|
||||||
|
|
||||||
if len(host) > 253 {
|
|
||||||
return errors.New("hostname cannot exceed 253 characters")
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, part := range strings.Split(s, ".") {
|
|
||||||
if l := len(part); l == 0 || l > 63 {
|
|
||||||
return errors.New("hostname part must be non-empty and cannot exceed 63 characters")
|
|
||||||
}
|
|
||||||
|
|
||||||
if part[0] == '-' {
|
|
||||||
return errors.New("hostname parts cannot begin with hyphens")
|
|
||||||
}
|
|
||||||
|
|
||||||
if part[len(part)-1] == '-' {
|
|
||||||
return errors.New("hostname parts cannot end with hyphens")
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, r := range part {
|
|
||||||
if (r < 'a' || r > 'z') && (r < '0' || r > '9') && r != '-' {
|
|
||||||
return fmt.Errorf("hostname parts can only contain alphanumeric characters or hyphens, got %q", string(r))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateCDNRequestValidationError is the validation error returned by
|
|
||||||
// UpdateCDNRequest.Validate if the designated constraints aren't met.
|
|
||||||
type UpdateCDNRequestValidationError struct {
|
|
||||||
field string
|
|
||||||
reason string
|
|
||||||
cause error
|
|
||||||
key bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Field function returns field value.
|
|
||||||
func (e UpdateCDNRequestValidationError) Field() string { return e.field }
|
|
||||||
|
|
||||||
// Reason function returns reason value.
|
|
||||||
func (e UpdateCDNRequestValidationError) Reason() string { return e.reason }
|
|
||||||
|
|
||||||
// Cause function returns cause value.
|
|
||||||
func (e UpdateCDNRequestValidationError) Cause() error { return e.cause }
|
|
||||||
|
|
||||||
// Key function returns key value.
|
|
||||||
func (e UpdateCDNRequestValidationError) Key() bool { return e.key }
|
|
||||||
|
|
||||||
// ErrorName returns error name.
|
|
||||||
func (e UpdateCDNRequestValidationError) ErrorName() string { return "UpdateCDNRequestValidationError" }
|
|
||||||
|
|
||||||
// Error satisfies the builtin error interface
|
|
||||||
func (e UpdateCDNRequestValidationError) Error() string {
|
|
||||||
cause := ""
|
|
||||||
if e.cause != nil {
|
|
||||||
cause = fmt.Sprintf(" | caused by: %v", e.cause)
|
|
||||||
}
|
|
||||||
|
|
||||||
key := ""
|
|
||||||
if e.key {
|
|
||||||
key = "key for "
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Sprintf(
|
|
||||||
"invalid %sUpdateCDNRequest.%s: %s%s",
|
|
||||||
key,
|
|
||||||
e.field,
|
|
||||||
e.reason,
|
|
||||||
cause)
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ error = UpdateCDNRequestValidationError{}
|
|
||||||
|
|
||||||
var _ interface {
|
|
||||||
Field() string
|
|
||||||
Reason() string
|
|
||||||
Key() bool
|
|
||||||
Cause() error
|
|
||||||
ErrorName() string
|
|
||||||
} = UpdateCDNRequestValidationError{}
|
|
||||||
|
|
||||||
// Validate checks the field values on SeedPeerCluster with the rules defined
|
// Validate checks the field values on SeedPeerCluster with the rules defined
|
||||||
// in the proto definition for this message. If any rules are violated, an
|
// in the proto definition for this message. If any rules are violated, an
|
||||||
// error is returned.
|
// error is returned.
|
||||||
|
|
@ -672,6 +206,8 @@ func (m *SeedPeer) Validate() error {
|
||||||
|
|
||||||
// no validation rules for Type
|
// no validation rules for Type
|
||||||
|
|
||||||
|
// no validation rules for IsCdn
|
||||||
|
|
||||||
// no validation rules for Idc
|
// no validation rules for Idc
|
||||||
|
|
||||||
// no validation rules for NetTopology
|
// no validation rules for NetTopology
|
||||||
|
|
@ -912,13 +448,15 @@ func (m *UpdateSeedPeerRequest) Validate() error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if l := utf8.RuneCountInString(m.GetType()); l < 1 || l > 1024 {
|
if _, ok := _UpdateSeedPeerRequest_Type_InLookup[m.GetType()]; !ok {
|
||||||
return UpdateSeedPeerRequestValidationError{
|
return UpdateSeedPeerRequestValidationError{
|
||||||
field: "Type",
|
field: "Type",
|
||||||
reason: "value length must be between 1 and 1024 runes, inclusive",
|
reason: "value must be in list [super strong weak]",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// no validation rules for IsCdn
|
||||||
|
|
||||||
if m.GetIdc() != "" {
|
if m.GetIdc() != "" {
|
||||||
|
|
||||||
if l := utf8.RuneCountInString(m.GetIdc()); l < 1 || l > 1024 {
|
if l := utf8.RuneCountInString(m.GetIdc()); l < 1 || l > 1024 {
|
||||||
|
|
@ -1069,6 +607,12 @@ var _ interface {
|
||||||
ErrorName() string
|
ErrorName() string
|
||||||
} = UpdateSeedPeerRequestValidationError{}
|
} = UpdateSeedPeerRequestValidationError{}
|
||||||
|
|
||||||
|
var _UpdateSeedPeerRequest_Type_InLookup = map[string]struct{}{
|
||||||
|
"super": {},
|
||||||
|
"strong": {},
|
||||||
|
"weak": {},
|
||||||
|
}
|
||||||
|
|
||||||
// Validate checks the field values on SchedulerCluster with the rules defined
|
// Validate checks the field values on SchedulerCluster with the rules defined
|
||||||
// in the proto definition for this message. If any rules are violated, an
|
// in the proto definition for this message. If any rules are violated, an
|
||||||
// error is returned.
|
// error is returned.
|
||||||
|
|
@ -1193,21 +737,6 @@ func (m *Scheduler) Validate() error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for idx, item := range m.GetCdns() {
|
|
||||||
_, _ = idx, item
|
|
||||||
|
|
||||||
if v, ok := interface{}(item).(interface{ Validate() error }); ok {
|
|
||||||
if err := v.Validate(); err != nil {
|
|
||||||
return SchedulerValidationError{
|
|
||||||
field: fmt.Sprintf("Cdns[%v]", idx),
|
|
||||||
reason: "embedded message failed validation",
|
|
||||||
cause: err,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
for idx, item := range m.GetSeedPeers() {
|
for idx, item := range m.GetSeedPeers() {
|
||||||
_, _ = idx, item
|
_, _ = idx, item
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -27,12 +27,10 @@ option go_package = "d7y.io/dragonfly/v2/pkg/rpc/manager";
|
||||||
enum SourceType {
|
enum SourceType {
|
||||||
// Scheduler service.
|
// Scheduler service.
|
||||||
SCHEDULER_SOURCE = 0;
|
SCHEDULER_SOURCE = 0;
|
||||||
// Dfdaemon service.
|
// Peer service.
|
||||||
CLIENT_SOURCE = 1;
|
PEER_SOURCE = 1;
|
||||||
// Deprecated: Use SuperSeed type of SeedPeer instead.
|
|
||||||
CDN_SOURCE = 2;
|
|
||||||
// SeedPeer service.
|
// SeedPeer service.
|
||||||
SEED_PEER_SOURCE = 3;
|
SEED_PEER_SOURCE = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
// SecurityGroup represents security group of cluster.
|
// SecurityGroup represents security group of cluster.
|
||||||
|
|
@ -49,76 +47,6 @@ message SecurityGroup {
|
||||||
string proxy_domain = 5;
|
string proxy_domain = 5;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deprecated: Use SeedPeerCluster instead.
|
|
||||||
message CDNCluster {
|
|
||||||
// Cluster id.
|
|
||||||
uint64 id = 1;
|
|
||||||
// Cluster name.
|
|
||||||
string name = 2;
|
|
||||||
// Cluster biography.
|
|
||||||
string bio = 3;
|
|
||||||
// Cluster configuration.
|
|
||||||
bytes config = 4;
|
|
||||||
// Security group to which the cdn cluster belongs.
|
|
||||||
SecurityGroup security_group = 6;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Use SuperSeed type of SeedPeer instead.
|
|
||||||
message CDN {
|
|
||||||
// CDN id.
|
|
||||||
uint64 id = 1;
|
|
||||||
// CDN hostname.
|
|
||||||
string host_name = 2;
|
|
||||||
// CDN idc.
|
|
||||||
string idc = 3;
|
|
||||||
// CDN location.
|
|
||||||
string location = 4;
|
|
||||||
// CDN ip.
|
|
||||||
string ip = 5;
|
|
||||||
// CDN grpc port.
|
|
||||||
int32 port = 6;
|
|
||||||
// CDN download port.
|
|
||||||
int32 download_port = 7;
|
|
||||||
// CDN state.
|
|
||||||
string state = 8;
|
|
||||||
// ID of the cluster to which the cdn belongs.
|
|
||||||
uint64 cdn_cluster_id = 9;
|
|
||||||
// Cluster to which the cdn belongs.
|
|
||||||
CDNCluster cdn_cluster = 10;
|
|
||||||
// Schedulers included in cdn.
|
|
||||||
repeated Scheduler schedulers = 11;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Use GetSeedPeerRequest instead.
|
|
||||||
message GetCDNRequest {
|
|
||||||
// Request source type.
|
|
||||||
SourceType source_type = 1 [(validate.rules).enum.defined_only = true];
|
|
||||||
// CDN hostname.
|
|
||||||
string host_name = 2 [(validate.rules).string.hostname = true];
|
|
||||||
// ID of the cluster to which the cdn belongs.
|
|
||||||
uint64 cdn_cluster_id = 3 [(validate.rules).uint64 = {gte: 1}];
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Use UpdateSeedPeerRequest instead.
|
|
||||||
message UpdateCDNRequest {
|
|
||||||
// Request source type.
|
|
||||||
SourceType source_type = 1 [(validate.rules).enum.defined_only = true];
|
|
||||||
// CDN hostname.
|
|
||||||
string host_name = 2 [(validate.rules).string.hostname = true];
|
|
||||||
// CDN idc.
|
|
||||||
string idc = 3 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
|
|
||||||
// CDN location.
|
|
||||||
string location = 4 [(validate.rules).string = {max_len: 1024, ignore_empty: true}];
|
|
||||||
// CDN ip.
|
|
||||||
string ip = 5 [(validate.rules).string = {ip: true}];
|
|
||||||
// CDN grpc port.
|
|
||||||
int32 port = 6 [(validate.rules).int32 = {gte: 1024, lt: 65535}];
|
|
||||||
// CDN download port.
|
|
||||||
int32 download_port = 7 [(validate.rules).int32 = {gte: 1024, lt: 65535}];
|
|
||||||
// ID of the cluster to which the cdn belongs.
|
|
||||||
uint64 cdn_cluster_id = 8 [(validate.rules).uint64 = {gte: 1}];
|
|
||||||
}
|
|
||||||
|
|
||||||
// SeedPeerCluster represents cluster of seed peer.
|
// SeedPeerCluster represents cluster of seed peer.
|
||||||
message SeedPeerCluster {
|
message SeedPeerCluster {
|
||||||
// Cluster id.
|
// Cluster id.
|
||||||
|
|
@ -143,26 +71,28 @@ message SeedPeer {
|
||||||
string host_name = 2;
|
string host_name = 2;
|
||||||
// Seed peer type.
|
// Seed peer type.
|
||||||
string type = 3;
|
string type = 3;
|
||||||
|
// CDN seed peer.
|
||||||
|
bool is_cdn = 4;
|
||||||
// Seed peer idc.
|
// Seed peer idc.
|
||||||
string idc = 4;
|
string idc = 5;
|
||||||
// Seed peer network topology.
|
// Seed peer network topology.
|
||||||
string net_topology = 5;
|
string net_topology = 6;
|
||||||
// Seed peer location.
|
// Seed peer location.
|
||||||
string location = 6;
|
string location = 7;
|
||||||
// Seed peer ip.
|
// Seed peer ip.
|
||||||
string ip = 7;
|
string ip = 8;
|
||||||
// Seed peer grpc port.
|
// Seed peer grpc port.
|
||||||
int32 port = 8;
|
int32 port = 9;
|
||||||
// Seed peer download port.
|
// Seed peer download port.
|
||||||
int32 download_port = 9;
|
int32 download_port = 10;
|
||||||
// Seed peer state.
|
// Seed peer state.
|
||||||
string state = 10;
|
string state = 11;
|
||||||
// ID of the cluster to which the seed peer belongs.
|
// ID of the cluster to which the seed peer belongs.
|
||||||
uint64 seed_peer_cluster_id = 11;
|
uint64 seed_peer_cluster_id = 12;
|
||||||
// Cluster to which the seed peer belongs.
|
// Cluster to which the seed peer belongs.
|
||||||
SeedPeerCluster seed_peer_cluster = 12;
|
SeedPeerCluster seed_peer_cluster = 13;
|
||||||
// Schedulers included in seed peer.
|
// Schedulers included in seed peer.
|
||||||
repeated Scheduler schedulers = 13;
|
repeated Scheduler schedulers = 14;
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetSeedPeerRequest represents request of GetSeedPeer.
|
// GetSeedPeerRequest represents request of GetSeedPeer.
|
||||||
|
|
@ -182,21 +112,23 @@ message UpdateSeedPeerRequest {
|
||||||
// Seed peer hostname.
|
// Seed peer hostname.
|
||||||
string host_name = 2 [(validate.rules).string.hostname = true];
|
string host_name = 2 [(validate.rules).string.hostname = true];
|
||||||
// Seed peer type.
|
// Seed peer type.
|
||||||
string type = 3 [(validate.rules).string = {min_len: 1, max_len: 1024}];
|
string type = 3 [(validate.rules).string = {in: ["super", "strong", "weak"]}];
|
||||||
|
// CDN seed peer.
|
||||||
|
bool is_cdn = 4;
|
||||||
// Seed peer idc.
|
// Seed peer idc.
|
||||||
string idc = 4 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
|
string idc = 5 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
|
||||||
// Seed peer network topology.
|
// Seed peer network topology.
|
||||||
string net_topology = 5 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
|
string net_topology = 6 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
|
||||||
// Seed peer location.
|
// Seed peer location.
|
||||||
string location = 6 [(validate.rules).string = {max_len: 1024, ignore_empty: true}];
|
string location = 7 [(validate.rules).string = {max_len: 1024, ignore_empty: true}];
|
||||||
// Seed peer ip.
|
// Seed peer ip.
|
||||||
string ip = 7 [(validate.rules).string = {ip: true}];
|
string ip = 8 [(validate.rules).string = {ip: true}];
|
||||||
// Seed peer port.
|
// Seed peer port.
|
||||||
int32 port = 8 [(validate.rules).int32 = {gte: 1024, lt: 65535}];
|
int32 port = 9 [(validate.rules).int32 = {gte: 1024, lt: 65535}];
|
||||||
// Seed peer download port.
|
// Seed peer download port.
|
||||||
int32 download_port = 9 [(validate.rules).int32 = {gte: 1024, lt: 65535}];
|
int32 download_port = 10 [(validate.rules).int32 = {gte: 1024, lt: 65535}];
|
||||||
// ID of the cluster to which the seed peer belongs.
|
// ID of the cluster to which the seed peer belongs.
|
||||||
uint64 seed_peer_cluster_id = 10 [(validate.rules).uint64 = {gte: 1}];
|
uint64 seed_peer_cluster_id = 11 [(validate.rules).uint64 = {gte: 1}];
|
||||||
}
|
}
|
||||||
|
|
||||||
// SeedPeerCluster represents cluster of scheduler.
|
// SeedPeerCluster represents cluster of scheduler.
|
||||||
|
|
@ -241,8 +173,6 @@ message Scheduler {
|
||||||
uint64 scheduler_cluster_id = 10;
|
uint64 scheduler_cluster_id = 10;
|
||||||
// Cluster to which the scheduler belongs.
|
// Cluster to which the scheduler belongs.
|
||||||
SchedulerCluster scheduler_cluster = 11;
|
SchedulerCluster scheduler_cluster = 11;
|
||||||
// Deprecated: Use seed_peers instead.
|
|
||||||
repeated CDN cdns = 12;
|
|
||||||
// Seed peers to which the scheduler belongs.
|
// Seed peers to which the scheduler belongs.
|
||||||
repeated SeedPeer seed_peers = 13;
|
repeated SeedPeer seed_peers = 13;
|
||||||
// Scheduler network topology.
|
// Scheduler network topology.
|
||||||
|
|
@ -317,10 +247,6 @@ service Manager {
|
||||||
rpc GetSeedPeer(GetSeedPeerRequest) returns(SeedPeer);
|
rpc GetSeedPeer(GetSeedPeerRequest) returns(SeedPeer);
|
||||||
// Update SeedPeer configuration.
|
// Update SeedPeer configuration.
|
||||||
rpc UpdateSeedPeer(UpdateSeedPeerRequest) returns(SeedPeer);
|
rpc UpdateSeedPeer(UpdateSeedPeerRequest) returns(SeedPeer);
|
||||||
// Deprecated: Use GetSeedPeer instead.
|
|
||||||
rpc GetCDN(GetCDNRequest) returns(CDN);
|
|
||||||
// Deprecated: Use UpdateSeedPeer instead.
|
|
||||||
rpc UpdateCDN(UpdateCDNRequest) returns(CDN);
|
|
||||||
// Get Scheduler and Scheduler cluster configuration.
|
// Get Scheduler and Scheduler cluster configuration.
|
||||||
rpc GetScheduler(GetSchedulerRequest)returns(Scheduler);
|
rpc GetScheduler(GetSchedulerRequest)returns(Scheduler);
|
||||||
// Update scheduler configuration.
|
// Update scheduler configuration.
|
||||||
|
|
|
||||||
|
|
@ -38,26 +38,6 @@ func (m *MockManagerClient) EXPECT() *MockManagerClientMockRecorder {
|
||||||
return m.recorder
|
return m.recorder
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetCDN mocks base method.
|
|
||||||
func (m *MockManagerClient) GetCDN(ctx context.Context, in *manager.GetCDNRequest, opts ...grpc.CallOption) (*manager.CDN, error) {
|
|
||||||
m.ctrl.T.Helper()
|
|
||||||
varargs := []interface{}{ctx, in}
|
|
||||||
for _, a := range opts {
|
|
||||||
varargs = append(varargs, a)
|
|
||||||
}
|
|
||||||
ret := m.ctrl.Call(m, "GetCDN", varargs...)
|
|
||||||
ret0, _ := ret[0].(*manager.CDN)
|
|
||||||
ret1, _ := ret[1].(error)
|
|
||||||
return ret0, ret1
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetCDN indicates an expected call of GetCDN.
|
|
||||||
func (mr *MockManagerClientMockRecorder) GetCDN(ctx, in interface{}, opts ...interface{}) *gomock.Call {
|
|
||||||
mr.mock.ctrl.T.Helper()
|
|
||||||
varargs := append([]interface{}{ctx, in}, opts...)
|
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCDN", reflect.TypeOf((*MockManagerClient)(nil).GetCDN), varargs...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetScheduler mocks base method.
|
// GetScheduler mocks base method.
|
||||||
func (m *MockManagerClient) GetScheduler(ctx context.Context, in *manager.GetSchedulerRequest, opts ...grpc.CallOption) (*manager.Scheduler, error) {
|
func (m *MockManagerClient) GetScheduler(ctx context.Context, in *manager.GetSchedulerRequest, opts ...grpc.CallOption) (*manager.Scheduler, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
|
|
@ -138,26 +118,6 @@ func (mr *MockManagerClientMockRecorder) ListSchedulers(ctx, in interface{}, opt
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSchedulers", reflect.TypeOf((*MockManagerClient)(nil).ListSchedulers), varargs...)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSchedulers", reflect.TypeOf((*MockManagerClient)(nil).ListSchedulers), varargs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateCDN mocks base method.
|
|
||||||
func (m *MockManagerClient) UpdateCDN(ctx context.Context, in *manager.UpdateCDNRequest, opts ...grpc.CallOption) (*manager.CDN, error) {
|
|
||||||
m.ctrl.T.Helper()
|
|
||||||
varargs := []interface{}{ctx, in}
|
|
||||||
for _, a := range opts {
|
|
||||||
varargs = append(varargs, a)
|
|
||||||
}
|
|
||||||
ret := m.ctrl.Call(m, "UpdateCDN", varargs...)
|
|
||||||
ret0, _ := ret[0].(*manager.CDN)
|
|
||||||
ret1, _ := ret[1].(error)
|
|
||||||
return ret0, ret1
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateCDN indicates an expected call of UpdateCDN.
|
|
||||||
func (mr *MockManagerClientMockRecorder) UpdateCDN(ctx, in interface{}, opts ...interface{}) *gomock.Call {
|
|
||||||
mr.mock.ctrl.T.Helper()
|
|
||||||
varargs := append([]interface{}{ctx, in}, opts...)
|
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateCDN", reflect.TypeOf((*MockManagerClient)(nil).UpdateCDN), varargs...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateScheduler mocks base method.
|
// UpdateScheduler mocks base method.
|
||||||
func (m *MockManagerClient) UpdateScheduler(ctx context.Context, in *manager.UpdateSchedulerRequest, opts ...grpc.CallOption) (*manager.Scheduler, error) {
|
func (m *MockManagerClient) UpdateScheduler(ctx context.Context, in *manager.UpdateSchedulerRequest, opts ...grpc.CallOption) (*manager.Scheduler, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
|
|
@ -358,21 +318,6 @@ func (m *MockManagerServer) EXPECT() *MockManagerServerMockRecorder {
|
||||||
return m.recorder
|
return m.recorder
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetCDN mocks base method.
|
|
||||||
func (m *MockManagerServer) GetCDN(arg0 context.Context, arg1 *manager.GetCDNRequest) (*manager.CDN, error) {
|
|
||||||
m.ctrl.T.Helper()
|
|
||||||
ret := m.ctrl.Call(m, "GetCDN", arg0, arg1)
|
|
||||||
ret0, _ := ret[0].(*manager.CDN)
|
|
||||||
ret1, _ := ret[1].(error)
|
|
||||||
return ret0, ret1
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetCDN indicates an expected call of GetCDN.
|
|
||||||
func (mr *MockManagerServerMockRecorder) GetCDN(arg0, arg1 interface{}) *gomock.Call {
|
|
||||||
mr.mock.ctrl.T.Helper()
|
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCDN", reflect.TypeOf((*MockManagerServer)(nil).GetCDN), arg0, arg1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetScheduler mocks base method.
|
// GetScheduler mocks base method.
|
||||||
func (m *MockManagerServer) GetScheduler(arg0 context.Context, arg1 *manager.GetSchedulerRequest) (*manager.Scheduler, error) {
|
func (m *MockManagerServer) GetScheduler(arg0 context.Context, arg1 *manager.GetSchedulerRequest) (*manager.Scheduler, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
|
|
@ -432,21 +377,6 @@ func (mr *MockManagerServerMockRecorder) ListSchedulers(arg0, arg1 interface{})
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSchedulers", reflect.TypeOf((*MockManagerServer)(nil).ListSchedulers), arg0, arg1)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSchedulers", reflect.TypeOf((*MockManagerServer)(nil).ListSchedulers), arg0, arg1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateCDN mocks base method.
|
|
||||||
func (m *MockManagerServer) UpdateCDN(arg0 context.Context, arg1 *manager.UpdateCDNRequest) (*manager.CDN, error) {
|
|
||||||
m.ctrl.T.Helper()
|
|
||||||
ret := m.ctrl.Call(m, "UpdateCDN", arg0, arg1)
|
|
||||||
ret0, _ := ret[0].(*manager.CDN)
|
|
||||||
ret1, _ := ret[1].(error)
|
|
||||||
return ret0, ret1
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateCDN indicates an expected call of UpdateCDN.
|
|
||||||
func (mr *MockManagerServerMockRecorder) UpdateCDN(arg0, arg1 interface{}) *gomock.Call {
|
|
||||||
mr.mock.ctrl.T.Helper()
|
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateCDN", reflect.TypeOf((*MockManagerServer)(nil).UpdateCDN), arg0, arg1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateScheduler mocks base method.
|
// UpdateScheduler mocks base method.
|
||||||
func (m *MockManagerServer) UpdateScheduler(arg0 context.Context, arg1 *manager.UpdateSchedulerRequest) (*manager.Scheduler, error) {
|
func (m *MockManagerServer) UpdateScheduler(arg0 context.Context, arg1 *manager.UpdateSchedulerRequest) (*manager.Scheduler, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
|
|
|
||||||
|
|
@ -369,8 +369,8 @@ type PeerHost struct {
|
||||||
sizeCache protoimpl.SizeCache
|
sizeCache protoimpl.SizeCache
|
||||||
unknownFields protoimpl.UnknownFields
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
// each time the daemon starts, it will generate a different uuid
|
// each time the daemon starts, it will generate a different id
|
||||||
Uuid string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"`
|
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||||
// peer host ip
|
// peer host ip
|
||||||
Ip string `protobuf:"bytes,2,opt,name=ip,proto3" json:"ip,omitempty"`
|
Ip string `protobuf:"bytes,2,opt,name=ip,proto3" json:"ip,omitempty"`
|
||||||
// rpc service port for peer
|
// rpc service port for peer
|
||||||
|
|
@ -421,9 +421,9 @@ func (*PeerHost) Descriptor() ([]byte, []int) {
|
||||||
return file_pkg_rpc_scheduler_scheduler_proto_rawDescGZIP(), []int{3}
|
return file_pkg_rpc_scheduler_scheduler_proto_rawDescGZIP(), []int{3}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *PeerHost) GetUuid() string {
|
func (x *PeerHost) GetId() string {
|
||||||
if x != nil {
|
if x != nil {
|
||||||
return x.Uuid
|
return x.Id
|
||||||
}
|
}
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
@ -1262,174 +1262,174 @@ var file_pkg_rpc_scheduler_scheduler_proto_rawDesc = []byte{
|
||||||
0x02, 0x10, 0x01, 0x52, 0x07, 0x64, 0x73, 0x74, 0x41, 0x64, 0x64, 0x72, 0x12, 0x2e, 0x0a, 0x0a,
|
0x02, 0x10, 0x01, 0x52, 0x07, 0x64, 0x73, 0x74, 0x41, 0x64, 0x64, 0x72, 0x12, 0x2e, 0x0a, 0x0a,
|
||||||
0x70, 0x69, 0x65, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b,
|
0x70, 0x69, 0x65, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b,
|
||||||
0x32, 0x0f, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65, 0x49, 0x6e, 0x66,
|
0x32, 0x0f, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65, 0x49, 0x6e, 0x66,
|
||||||
0x6f, 0x52, 0x09, 0x70, 0x69, 0x65, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0xb5, 0x02, 0x0a,
|
0x6f, 0x52, 0x09, 0x70, 0x69, 0x65, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0xb0, 0x02, 0x0a,
|
||||||
0x08, 0x50, 0x65, 0x65, 0x72, 0x48, 0x6f, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x04, 0x75, 0x75, 0x69,
|
0x08, 0x50, 0x65, 0x65, 0x72, 0x48, 0x6f, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x02, 0x69, 0x64, 0x18,
|
||||||
0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x72, 0x03, 0xb0, 0x01,
|
0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x02,
|
||||||
0x01, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x12, 0x17, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x02, 0x20,
|
0x69, 0x64, 0x12, 0x17, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07,
|
||||||
0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x70, 0x01, 0x52, 0x02, 0x69, 0x70,
|
0xfa, 0x42, 0x04, 0x72, 0x02, 0x70, 0x01, 0x52, 0x02, 0x69, 0x70, 0x12, 0x27, 0x0a, 0x08, 0x72,
|
||||||
0x12, 0x27, 0x0a, 0x08, 0x72, 0x70, 0x63, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01,
|
0x70, 0x63, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x42, 0x0c, 0xfa,
|
||||||
0x28, 0x05, 0x42, 0x0c, 0xfa, 0x42, 0x09, 0x1a, 0x07, 0x10, 0xff, 0xff, 0x03, 0x28, 0x80, 0x08,
|
0x42, 0x09, 0x1a, 0x07, 0x10, 0xff, 0xff, 0x03, 0x28, 0x80, 0x08, 0x52, 0x07, 0x72, 0x70, 0x63,
|
||||||
0x52, 0x07, 0x72, 0x70, 0x63, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x29, 0x0a, 0x09, 0x64, 0x6f, 0x77,
|
0x50, 0x6f, 0x72, 0x74, 0x12, 0x29, 0x0a, 0x09, 0x64, 0x6f, 0x77, 0x6e, 0x5f, 0x70, 0x6f, 0x72,
|
||||||
0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x42, 0x0c, 0xfa, 0x42,
|
0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x42, 0x0c, 0xfa, 0x42, 0x09, 0x1a, 0x07, 0x10, 0xff,
|
||||||
0x09, 0x1a, 0x07, 0x10, 0xff, 0xff, 0x03, 0x28, 0x80, 0x08, 0x52, 0x08, 0x64, 0x6f, 0x77, 0x6e,
|
0xff, 0x03, 0x28, 0x80, 0x08, 0x52, 0x08, 0x64, 0x6f, 0x77, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x12,
|
||||||
0x50, 0x6f, 0x72, 0x74, 0x12, 0x24, 0x0a, 0x09, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x6e, 0x61, 0x6d,
|
0x24, 0x0a, 0x09, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01,
|
||||||
0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x68, 0x01,
|
0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x68, 0x01, 0x52, 0x08, 0x68, 0x6f, 0x73,
|
||||||
0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x65,
|
0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74,
|
||||||
0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20,
|
0x79, 0x5f, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e,
|
||||||
0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x6f, 0x6d,
|
0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x1a,
|
||||||
0x61, 0x69, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
|
0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09,
|
||||||
0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
|
0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x64,
|
||||||
0x10, 0x0a, 0x03, 0x69, 0x64, 0x63, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x69, 0x64,
|
0x63, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x69, 0x64, 0x63, 0x12, 0x21, 0x0a, 0x0c,
|
||||||
0x63, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x65, 0x74, 0x5f, 0x74, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67,
|
0x6e, 0x65, 0x74, 0x5f, 0x74, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x18, 0x09, 0x20, 0x01,
|
||||||
0x79, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f,
|
0x28, 0x09, 0x52, 0x0b, 0x6e, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x22,
|
||||||
0x6c, 0x6f, 0x67, 0x79, 0x22, 0xe2, 0x02, 0x0a, 0x0b, 0x50, 0x69, 0x65, 0x63, 0x65, 0x52, 0x65,
|
0xe2, 0x02, 0x0a, 0x0b, 0x50, 0x69, 0x65, 0x63, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12,
|
||||||
|
0x20, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
|
||||||
|
0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49,
|
||||||
|
0x64, 0x12, 0x20, 0x0a, 0x07, 0x73, 0x72, 0x63, 0x5f, 0x70, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01,
|
||||||
|
0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x73, 0x72, 0x63,
|
||||||
|
0x50, 0x69, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x73, 0x74, 0x5f, 0x70, 0x69, 0x64, 0x18, 0x03,
|
||||||
|
0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x73, 0x74, 0x50, 0x69, 0x64, 0x12, 0x2e, 0x0a, 0x0a,
|
||||||
|
0x70, 0x69, 0x65, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b,
|
||||||
|
0x32, 0x0f, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65, 0x49, 0x6e, 0x66,
|
||||||
|
0x6f, 0x52, 0x09, 0x70, 0x69, 0x65, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a,
|
||||||
|
0x62, 0x65, 0x67, 0x69, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04,
|
||||||
|
0x52, 0x09, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x65,
|
||||||
|
0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x65,
|
||||||
|
0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73,
|
||||||
|
0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73,
|
||||||
|
0x12, 0x1e, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0a,
|
||||||
|
0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65,
|
||||||
|
0x12, 0x2b, 0x0a, 0x09, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x09, 0x20,
|
||||||
|
0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x4c,
|
||||||
|
0x6f, 0x61, 0x64, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x12, 0x25, 0x0a,
|
||||||
|
0x0e, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18,
|
||||||
|
0x0a, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x43,
|
||||||
|
0x6f, 0x75, 0x6e, 0x74, 0x22, 0x8e, 0x03, 0x0a, 0x0a, 0x50, 0x65, 0x65, 0x72, 0x50, 0x61, 0x63,
|
||||||
|
0x6b, 0x65, 0x74, 0x12, 0x20, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x02,
|
||||||
|
0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x74,
|
||||||
|
0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x07, 0x73, 0x72, 0x63, 0x5f, 0x70, 0x69, 0x64,
|
||||||
|
0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52,
|
||||||
|
0x06, 0x73, 0x72, 0x63, 0x50, 0x69, 0x64, 0x12, 0x2e, 0x0a, 0x0e, 0x70, 0x61, 0x72, 0x61, 0x6c,
|
||||||
|
0x6c, 0x65, 0x6c, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x42,
|
||||||
|
0x07, 0xfa, 0x42, 0x04, 0x1a, 0x02, 0x28, 0x01, 0x52, 0x0d, 0x70, 0x61, 0x72, 0x61, 0x6c, 0x6c,
|
||||||
|
0x65, 0x6c, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x09, 0x6d, 0x61, 0x69, 0x6e, 0x5f,
|
||||||
|
0x70, 0x65, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x73, 0x63, 0x68,
|
||||||
|
0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x50, 0x61, 0x63, 0x6b, 0x65,
|
||||||
|
0x74, 0x2e, 0x44, 0x65, 0x73, 0x74, 0x50, 0x65, 0x65, 0x72, 0x52, 0x08, 0x6d, 0x61, 0x69, 0x6e,
|
||||||
|
0x50, 0x65, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0b, 0x73, 0x74, 0x65, 0x61, 0x6c, 0x5f, 0x70, 0x65,
|
||||||
|
0x65, 0x72, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x73, 0x63, 0x68, 0x65,
|
||||||
|
0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74,
|
||||||
|
0x2e, 0x44, 0x65, 0x73, 0x74, 0x50, 0x65, 0x65, 0x72, 0x52, 0x0a, 0x73, 0x74, 0x65, 0x61, 0x6c,
|
||||||
|
0x50, 0x65, 0x65, 0x72, 0x73, 0x12, 0x1e, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x07, 0x20,
|
||||||
|
0x01, 0x28, 0x0e, 0x32, 0x0a, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x52,
|
||||||
|
0x04, 0x63, 0x6f, 0x64, 0x65, 0x1a, 0x6e, 0x0a, 0x08, 0x44, 0x65, 0x73, 0x74, 0x50, 0x65, 0x65,
|
||||||
|
0x72, 0x12, 0x17, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa,
|
||||||
|
0x42, 0x04, 0x72, 0x02, 0x70, 0x01, 0x52, 0x02, 0x69, 0x70, 0x12, 0x27, 0x0a, 0x08, 0x72, 0x70,
|
||||||
|
0x63, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x42, 0x0c, 0xfa, 0x42,
|
||||||
|
0x09, 0x1a, 0x07, 0x10, 0xff, 0xff, 0x03, 0x28, 0x80, 0x08, 0x52, 0x07, 0x72, 0x70, 0x63, 0x50,
|
||||||
|
0x6f, 0x72, 0x74, 0x12, 0x20, 0x0a, 0x07, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03,
|
||||||
|
0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x70,
|
||||||
|
0x65, 0x65, 0x72, 0x49, 0x64, 0x22, 0xa6, 0x03, 0x0a, 0x0a, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65,
|
||||||
0x73, 0x75, 0x6c, 0x74, 0x12, 0x20, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18,
|
0x73, 0x75, 0x6c, 0x74, 0x12, 0x20, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18,
|
||||||
0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06,
|
0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06,
|
||||||
0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x07, 0x73, 0x72, 0x63, 0x5f, 0x70, 0x69,
|
0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x07, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69,
|
||||||
0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01,
|
0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01,
|
||||||
0x52, 0x06, 0x73, 0x72, 0x63, 0x50, 0x69, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x73, 0x74, 0x5f,
|
0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x06, 0x73, 0x72, 0x63, 0x5f,
|
||||||
0x70, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x73, 0x74, 0x50, 0x69,
|
0x69, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x70,
|
||||||
0x64, 0x12, 0x2e, 0x0a, 0x0a, 0x70, 0x69, 0x65, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18,
|
0x01, 0x52, 0x05, 0x73, 0x72, 0x63, 0x49, 0x70, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x65, 0x63, 0x75,
|
||||||
0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x50, 0x69, 0x65,
|
0x72, 0x69, 0x74, 0x79, 0x5f, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28,
|
||||||
0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09, 0x70, 0x69, 0x65, 0x63, 0x65, 0x49, 0x6e, 0x66,
|
0x09, 0x52, 0x0e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x6f, 0x6d, 0x61, 0x69,
|
||||||
0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18,
|
0x6e, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x64, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03,
|
||||||
0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x54, 0x69, 0x6d, 0x65,
|
0x69, 0x64, 0x63, 0x12, 0x1a, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09,
|
||||||
0x12, 0x19, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01,
|
0x42, 0x08, 0xfa, 0x42, 0x05, 0x72, 0x03, 0x88, 0x01, 0x01, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12,
|
||||||
0x28, 0x04, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73,
|
0x37, 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74,
|
||||||
0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75,
|
0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x42, 0x10, 0xfa, 0x42, 0x0d, 0x22, 0x0b, 0x28, 0xff,
|
||||||
0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x1e, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x08, 0x20,
|
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65,
|
||||||
0x01, 0x28, 0x0e, 0x32, 0x0a, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x52,
|
0x6e, 0x74, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x72, 0x61, 0x66,
|
||||||
0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x2b, 0x0a, 0x09, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x6c, 0x6f,
|
0x66, 0x69, 0x63, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x74, 0x72, 0x61, 0x66, 0x66,
|
||||||
0x61, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e,
|
0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x73, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0d,
|
||||||
0x48, 0x6f, 0x73, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x4c, 0x6f,
|
0x52, 0x04, 0x63, 0x6f, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73,
|
||||||
0x61, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x5f, 0x63,
|
0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73,
|
||||||
0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x66, 0x69, 0x6e, 0x69,
|
0x12, 0x1e, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0a,
|
||||||
0x73, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x8e, 0x03, 0x0a, 0x0a, 0x50, 0x65,
|
0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65,
|
||||||
0x65, 0x72, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x20, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b,
|
0x12, 0x3c, 0x0a, 0x11, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x69, 0x65, 0x63, 0x65, 0x5f,
|
||||||
0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02,
|
0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x05, 0x42, 0x10, 0xfa, 0x42, 0x0d,
|
||||||
0x10, 0x01, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x07, 0x73, 0x72,
|
0x1a, 0x0b, 0x28, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01, 0x52, 0x0f, 0x74,
|
||||||
0x63, 0x5f, 0x70, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04,
|
0x6f, 0x74, 0x61, 0x6c, 0x50, 0x69, 0x65, 0x63, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x50,
|
||||||
0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x73, 0x72, 0x63, 0x50, 0x69, 0x64, 0x12, 0x2e, 0x0a, 0x0e,
|
0x0a, 0x0a, 0x50, 0x65, 0x65, 0x72, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x20, 0x0a, 0x07,
|
||||||
0x70, 0x61, 0x72, 0x61, 0x6c, 0x6c, 0x65, 0x6c, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04,
|
0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa,
|
||||||
0x20, 0x01, 0x28, 0x05, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x1a, 0x02, 0x28, 0x01, 0x52, 0x0d, 0x70,
|
0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x20,
|
||||||
0x61, 0x72, 0x61, 0x6c, 0x6c, 0x65, 0x6c, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x09,
|
0x0a, 0x07, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42,
|
||||||
0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32,
|
0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64,
|
||||||
0x1e, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x50, 0x65, 0x65, 0x72,
|
0x22, 0x33, 0x0a, 0x0f, 0x53, 0x74, 0x61, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75,
|
||||||
0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x44, 0x65, 0x73, 0x74, 0x50, 0x65, 0x65, 0x72, 0x52,
|
0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01,
|
||||||
0x08, 0x6d, 0x61, 0x69, 0x6e, 0x50, 0x65, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0b, 0x73, 0x74, 0x65,
|
0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x74,
|
||||||
0x61, 0x6c, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e,
|
0x61, 0x73, 0x6b, 0x49, 0x64, 0x22, 0x94, 0x02, 0x0a, 0x04, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x17,
|
||||||
0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x50,
|
0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72,
|
||||||
0x61, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x44, 0x65, 0x73, 0x74, 0x50, 0x65, 0x65, 0x72, 0x52, 0x0a,
|
0x02, 0x10, 0x01, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18,
|
||||||
0x73, 0x74, 0x65, 0x61, 0x6c, 0x50, 0x65, 0x65, 0x72, 0x73, 0x12, 0x1e, 0x0a, 0x04, 0x63, 0x6f,
|
0x02, 0x20, 0x01, 0x28, 0x05, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x1a, 0x02, 0x28, 0x00, 0x52, 0x04,
|
||||||
0x64, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0a, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e,
|
0x74, 0x79, 0x70, 0x65, 0x12, 0x2e, 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f,
|
||||||
0x43, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x1a, 0x6e, 0x0a, 0x08, 0x44, 0x65,
|
0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x07, 0xfa, 0x42,
|
||||||
0x73, 0x74, 0x50, 0x65, 0x65, 0x72, 0x12, 0x17, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x01, 0x20, 0x01,
|
0x04, 0x22, 0x02, 0x28, 0x01, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4c, 0x65,
|
||||||
0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x70, 0x01, 0x52, 0x02, 0x69, 0x70, 0x12,
|
0x6e, 0x67, 0x74, 0x68, 0x12, 0x33, 0x0a, 0x11, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x69,
|
||||||
0x27, 0x0a, 0x08, 0x72, 0x70, 0x63, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28,
|
0x65, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x42,
|
||||||
0x05, 0x42, 0x0c, 0xfa, 0x42, 0x09, 0x1a, 0x07, 0x10, 0xff, 0xff, 0x03, 0x28, 0x80, 0x08, 0x52,
|
0x07, 0xfa, 0x42, 0x04, 0x1a, 0x02, 0x28, 0x01, 0x52, 0x0f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50,
|
||||||
0x07, 0x72, 0x70, 0x63, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x20, 0x0a, 0x07, 0x70, 0x65, 0x65, 0x72,
|
0x69, 0x65, 0x63, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x05, 0x73, 0x74, 0x61,
|
||||||
0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02,
|
0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10,
|
||||||
0x10, 0x01, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, 0x22, 0xa6, 0x03, 0x0a, 0x0a, 0x50,
|
0x01, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x26, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72,
|
||||||
0x65, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x20, 0x0a, 0x07, 0x74, 0x61, 0x73,
|
0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x05, 0x42, 0x07, 0xfa, 0x42,
|
||||||
0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72,
|
0x04, 0x1a, 0x02, 0x28, 0x00, 0x52, 0x09, 0x70, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74,
|
||||||
0x02, 0x10, 0x01, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x07, 0x70,
|
0x12, 0x2a, 0x0a, 0x10, 0x68, 0x61, 0x73, 0x41, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65,
|
||||||
0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42,
|
0x50, 0x65, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x68, 0x61, 0x73, 0x41,
|
||||||
0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1e, 0x0a,
|
0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x65, 0x65, 0x72, 0x22, 0xf8, 0x01, 0x0a,
|
||||||
0x06, 0x73, 0x72, 0x63, 0x5f, 0x69, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa,
|
0x13, 0x41, 0x6e, 0x6e, 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71,
|
||||||
0x42, 0x04, 0x72, 0x02, 0x70, 0x01, 0x52, 0x05, 0x73, 0x72, 0x63, 0x49, 0x70, 0x12, 0x27, 0x0a,
|
0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18,
|
||||||
0x0f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e,
|
0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06,
|
||||||
0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79,
|
0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x03, 0x63, 0x69, 0x64, 0x18, 0x02, 0x20,
|
||||||
0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x64, 0x63, 0x18, 0x05, 0x20,
|
0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x03, 0x63, 0x69,
|
||||||
0x01, 0x28, 0x09, 0x52, 0x03, 0x69, 0x64, 0x63, 0x12, 0x1a, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18,
|
0x64, 0x12, 0x32, 0x0a, 0x08, 0x75, 0x72, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x03, 0x20,
|
||||||
0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x72, 0x03, 0x88, 0x01, 0x01, 0x52,
|
0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x55, 0x72, 0x6c, 0x4d, 0x65,
|
||||||
0x03, 0x75, 0x72, 0x6c, 0x12, 0x37, 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f,
|
0x74, 0x61, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x07, 0x75, 0x72,
|
||||||
0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x42, 0x10, 0xfa, 0x42,
|
0x6c, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x30, 0x0a, 0x09, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x68, 0x6f,
|
||||||
0x0d, 0x22, 0x0b, 0x28, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01, 0x52, 0x0d,
|
0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64,
|
||||||
0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a,
|
0x75, 0x6c, 0x65, 0x72, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x48, 0x6f, 0x73, 0x74, 0x52, 0x08, 0x70,
|
||||||
0x07, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07,
|
0x65, 0x65, 0x72, 0x48, 0x6f, 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x0c, 0x70, 0x69, 0x65, 0x63, 0x65,
|
||||||
0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x73, 0x74, 0x18,
|
0x5f, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e,
|
||||||
0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x73,
|
0x62, 0x61, 0x73, 0x65, 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74,
|
||||||
0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75,
|
0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0b, 0x70, 0x69, 0x65, 0x63,
|
||||||
0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x1e, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x0b, 0x20,
|
0x65, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x2a, 0x27, 0x0a, 0x07, 0x50, 0x61, 0x74, 0x74, 0x65,
|
||||||
0x01, 0x28, 0x0e, 0x32, 0x0a, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x52,
|
0x72, 0x6e, 0x12, 0x07, 0x0a, 0x03, 0x50, 0x32, 0x50, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x43,
|
||||||
0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x3c, 0x0a, 0x11, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70,
|
0x44, 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x02,
|
||||||
0x69, 0x65, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x05,
|
0x32, 0x9e, 0x03, 0x0a, 0x09, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x12, 0x49,
|
||||||
0x42, 0x10, 0xfa, 0x42, 0x0d, 0x1a, 0x0b, 0x28, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
0x0a, 0x10, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x50, 0x65, 0x65, 0x72, 0x54, 0x61,
|
||||||
0xff, 0x01, 0x52, 0x0f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x69, 0x65, 0x63, 0x65, 0x43, 0x6f,
|
0x73, 0x6b, 0x12, 0x1a, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x50,
|
||||||
0x75, 0x6e, 0x74, 0x22, 0x50, 0x0a, 0x0a, 0x50, 0x65, 0x65, 0x72, 0x54, 0x61, 0x72, 0x67, 0x65,
|
0x65, 0x65, 0x72, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19,
|
||||||
0x74, 0x12, 0x20, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
|
0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73,
|
||||||
0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x74, 0x61, 0x73,
|
0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x46, 0x0a, 0x11, 0x52, 0x65, 0x70,
|
||||||
0x6b, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x07, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02,
|
0x6f, 0x72, 0x74, 0x50, 0x69, 0x65, 0x63, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x16,
|
||||||
0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x70,
|
0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65,
|
||||||
0x65, 0x65, 0x72, 0x49, 0x64, 0x22, 0x33, 0x0a, 0x0f, 0x53, 0x74, 0x61, 0x74, 0x54, 0x61, 0x73,
|
0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x1a, 0x15, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c,
|
||||||
0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b,
|
0x65, 0x72, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x28, 0x01, 0x30,
|
||||||
0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02,
|
0x01, 0x12, 0x41, 0x0a, 0x10, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x50, 0x65, 0x65, 0x72, 0x52,
|
||||||
0x10, 0x01, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x22, 0x94, 0x02, 0x0a, 0x04, 0x54,
|
0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x15, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65,
|
||||||
0x61, 0x73, 0x6b, 0x12, 0x17, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42,
|
0x72, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x1a, 0x16, 0x2e, 0x67,
|
||||||
0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x04,
|
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45,
|
||||||
0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x1a,
|
0x6d, 0x70, 0x74, 0x79, 0x12, 0x3a, 0x0a, 0x09, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x54, 0x61, 0x73,
|
||||||
0x02, 0x28, 0x00, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x2e, 0x0a, 0x0e, 0x63, 0x6f, 0x6e,
|
0x6b, 0x12, 0x15, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x50, 0x65,
|
||||||
0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28,
|
0x65, 0x72, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
||||||
0x03, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x22, 0x02, 0x28, 0x01, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x74,
|
0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79,
|
||||||
0x65, 0x6e, 0x74, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x33, 0x0a, 0x11, 0x74, 0x6f, 0x74,
|
0x12, 0x37, 0x0a, 0x08, 0x53, 0x74, 0x61, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x1a, 0x2e, 0x73,
|
||||||
0x61, 0x6c, 0x5f, 0x70, 0x69, 0x65, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04,
|
0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x54, 0x61, 0x73,
|
||||||
0x20, 0x01, 0x28, 0x05, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x1a, 0x02, 0x28, 0x01, 0x52, 0x0f, 0x74,
|
0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0f, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64,
|
||||||
0x6f, 0x74, 0x61, 0x6c, 0x50, 0x69, 0x65, 0x63, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d,
|
0x75, 0x6c, 0x65, 0x72, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x46, 0x0a, 0x0c, 0x41, 0x6e, 0x6e,
|
||||||
0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa,
|
0x6f, 0x75, 0x6e, 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x1e, 0x2e, 0x73, 0x63, 0x68, 0x65,
|
||||||
0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x26, 0x0a,
|
0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x54, 0x61,
|
||||||
0x0a, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28,
|
0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
|
||||||
0x05, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x1a, 0x02, 0x28, 0x00, 0x52, 0x09, 0x70, 0x65, 0x65, 0x72,
|
0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74,
|
||||||
0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2a, 0x0a, 0x10, 0x68, 0x61, 0x73, 0x41, 0x76, 0x61, 0x69,
|
0x79, 0x42, 0x27, 0x5a, 0x25, 0x64, 0x37, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x64, 0x72, 0x61, 0x67,
|
||||||
0x6c, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x65, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52,
|
0x6f, 0x6e, 0x66, 0x6c, 0x79, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x72, 0x70, 0x63,
|
||||||
0x10, 0x68, 0x61, 0x73, 0x41, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x65, 0x65,
|
0x2f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
|
||||||
0x72, 0x22, 0xf8, 0x01, 0x0a, 0x13, 0x41, 0x6e, 0x6e, 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x54, 0x61,
|
0x6f, 0x33,
|
||||||
0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x07, 0x74, 0x61, 0x73,
|
|
||||||
0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72,
|
|
||||||
0x02, 0x10, 0x01, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x03, 0x63,
|
|
||||||
0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10,
|
|
||||||
0x01, 0x52, 0x03, 0x63, 0x69, 0x64, 0x12, 0x32, 0x0a, 0x08, 0x75, 0x72, 0x6c, 0x5f, 0x6d, 0x65,
|
|
||||||
0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e,
|
|
||||||
0x55, 0x72, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10,
|
|
||||||
0x01, 0x52, 0x07, 0x75, 0x72, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x30, 0x0a, 0x09, 0x70, 0x65,
|
|
||||||
0x65, 0x72, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e,
|
|
||||||
0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x48, 0x6f,
|
|
||||||
0x73, 0x74, 0x52, 0x08, 0x70, 0x65, 0x65, 0x72, 0x48, 0x6f, 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x0c,
|
|
||||||
0x70, 0x69, 0x65, 0x63, 0x65, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x05, 0x20, 0x01,
|
|
||||||
0x28, 0x0b, 0x32, 0x11, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65, 0x50,
|
|
||||||
0x61, 0x63, 0x6b, 0x65, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52,
|
|
||||||
0x0b, 0x70, 0x69, 0x65, 0x63, 0x65, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x2a, 0x27, 0x0a, 0x07,
|
|
||||||
0x50, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x07, 0x0a, 0x03, 0x50, 0x32, 0x50, 0x10, 0x00,
|
|
||||||
0x12, 0x07, 0x0a, 0x03, 0x43, 0x44, 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x4f, 0x55,
|
|
||||||
0x52, 0x43, 0x45, 0x10, 0x02, 0x32, 0x9e, 0x03, 0x0a, 0x09, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75,
|
|
||||||
0x6c, 0x65, 0x72, 0x12, 0x49, 0x0a, 0x10, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x50,
|
|
||||||
0x65, 0x65, 0x72, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x1a, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75,
|
|
||||||
0x6c, 0x65, 0x72, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75,
|
|
||||||
0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e,
|
|
||||||
0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x46,
|
|
||||||
0x0a, 0x11, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x50, 0x69, 0x65, 0x63, 0x65, 0x52, 0x65, 0x73,
|
|
||||||
0x75, 0x6c, 0x74, 0x12, 0x16, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e,
|
|
||||||
0x50, 0x69, 0x65, 0x63, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x1a, 0x15, 0x2e, 0x73, 0x63,
|
|
||||||
0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x50, 0x61, 0x63, 0x6b,
|
|
||||||
0x65, 0x74, 0x28, 0x01, 0x30, 0x01, 0x12, 0x41, 0x0a, 0x10, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74,
|
|
||||||
0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x15, 0x2e, 0x73, 0x63, 0x68,
|
|
||||||
0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c,
|
|
||||||
0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
|
||||||
0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x3a, 0x0a, 0x09, 0x4c, 0x65, 0x61,
|
|
||||||
0x76, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x15, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c,
|
|
||||||
0x65, 0x72, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x1a, 0x16, 0x2e,
|
|
||||||
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
|
|
||||||
0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x37, 0x0a, 0x08, 0x53, 0x74, 0x61, 0x74, 0x54, 0x61, 0x73,
|
|
||||||
0x6b, 0x12, 0x1a, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x53, 0x74,
|
|
||||||
0x61, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0f, 0x2e,
|
|
||||||
0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x46,
|
|
||||||
0x0a, 0x0c, 0x41, 0x6e, 0x6e, 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x1e,
|
|
||||||
0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x75,
|
|
||||||
0x6e, 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16,
|
|
||||||
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
|
|
||||||
0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x27, 0x5a, 0x25, 0x64, 0x37, 0x79, 0x2e, 0x69, 0x6f,
|
|
||||||
0x2f, 0x64, 0x72, 0x61, 0x67, 0x6f, 0x6e, 0x66, 0x6c, 0x79, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x6b,
|
|
||||||
0x67, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x62,
|
|
||||||
0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
|
|
||||||
|
|
@ -43,9 +43,6 @@ var (
|
||||||
_ = base.Code(0)
|
_ = base.Code(0)
|
||||||
)
|
)
|
||||||
|
|
||||||
// define the regex for a UUID once up-front
|
|
||||||
var _scheduler_uuidPattern = regexp.MustCompile("^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$")
|
|
||||||
|
|
||||||
// Validate checks the field values on PeerTaskRequest with the rules defined
|
// Validate checks the field values on PeerTaskRequest with the rules defined
|
||||||
// in the proto definition for this message. If any rules are violated, an
|
// in the proto definition for this message. If any rules are violated, an
|
||||||
// error is returned.
|
// error is returned.
|
||||||
|
|
@ -366,11 +363,10 @@ func (m *PeerHost) Validate() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := m._validateUuid(m.GetUuid()); err != nil {
|
if utf8.RuneCountInString(m.GetId()) < 1 {
|
||||||
return PeerHostValidationError{
|
return PeerHostValidationError{
|
||||||
field: "Uuid",
|
field: "Id",
|
||||||
reason: "value must be a valid UUID",
|
reason: "value length must be at least 1 runes",
|
||||||
cause: err,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -444,14 +440,6 @@ func (m *PeerHost) _validateHostname(host string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *PeerHost) _validateUuid(uuid string) error {
|
|
||||||
if matched := _scheduler_uuidPattern.MatchString(uuid); !matched {
|
|
||||||
return errors.New("invalid uuid format")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PeerHostValidationError is the validation error returned by
|
// PeerHostValidationError is the validation error returned by
|
||||||
// PeerHost.Validate if the designated constraints aren't met.
|
// PeerHost.Validate if the designated constraints aren't met.
|
||||||
type PeerHostValidationError struct {
|
type PeerHostValidationError struct {
|
||||||
|
|
|
||||||
|
|
@ -74,8 +74,8 @@ message SinglePiece{
|
||||||
}
|
}
|
||||||
|
|
||||||
message PeerHost{
|
message PeerHost{
|
||||||
// each time the daemon starts, it will generate a different uuid
|
// each time the daemon starts, it will generate a different id
|
||||||
string uuid = 1 [(validate.rules).string.uuid = true];
|
string id = 1 [(validate.rules).string.min_len = 1];
|
||||||
// peer host ip
|
// peer host ip
|
||||||
string ip = 2 [(validate.rules).string.ip = true];
|
string ip = 2 [(validate.rules).string.ip = true];
|
||||||
// rpc service port for peer
|
// rpc service port for peer
|
||||||
|
|
|
||||||
|
|
@ -84,7 +84,7 @@ func New() *Config {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
DynConfig: &DynConfig{
|
DynConfig: &DynConfig{
|
||||||
RefreshInterval: 1 * time.Minute,
|
RefreshInterval: 10 * time.Second,
|
||||||
},
|
},
|
||||||
Host: &HostConfig{},
|
Host: &HostConfig{},
|
||||||
Manager: &ManagerConfig{
|
Manager: &ManagerConfig{
|
||||||
|
|
|
||||||
|
|
@ -141,7 +141,7 @@ func TestConfig_New(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
DynConfig: &DynConfig{
|
DynConfig: &DynConfig{
|
||||||
RefreshInterval: 1 * time.Minute,
|
RefreshInterval: 10 * time.Second,
|
||||||
},
|
},
|
||||||
Host: &HostConfig{},
|
Host: &HostConfig{},
|
||||||
Manager: &ManagerConfig{
|
Manager: &ManagerConfig{
|
||||||
|
|
|
||||||
|
|
@ -17,8 +17,8 @@
|
||||||
package config
|
package config
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// Default number of cdn load limit.
|
// Default number of seed peer load limit.
|
||||||
DefaultCDNLoadLimit = 300
|
DefaultSeedPeerLoadLimit = 300
|
||||||
|
|
||||||
// Default number of client load limit.
|
// Default number of client load limit.
|
||||||
DefaultClientLoadLimit = 50
|
DefaultClientLoadLimit = 50
|
||||||
|
|
|
||||||
|
|
@ -41,7 +41,6 @@ var (
|
||||||
|
|
||||||
type DynconfigData struct {
|
type DynconfigData struct {
|
||||||
SeedPeers []*SeedPeer `yaml:"seedPeers" mapstructure:"seedPeers" json:"seed_peers"`
|
SeedPeers []*SeedPeer `yaml:"seedPeers" mapstructure:"seedPeers" json:"seed_peers"`
|
||||||
CDNs []*CDN `yaml:"cdns" mapstructure:"cdns" json:"cdns"`
|
|
||||||
SchedulerCluster *SchedulerCluster `yaml:"schedulerCluster" mapstructure:"schedulerCluster" json:"scheduler_cluster"`
|
SchedulerCluster *SchedulerCluster `yaml:"schedulerCluster" mapstructure:"schedulerCluster" json:"scheduler_cluster"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -49,6 +48,7 @@ type SeedPeer struct {
|
||||||
ID uint `yaml:"id" mapstructure:"id" json:"id"`
|
ID uint `yaml:"id" mapstructure:"id" json:"id"`
|
||||||
Hostname string `yaml:"hostname" mapstructure:"hostname" json:"host_name"`
|
Hostname string `yaml:"hostname" mapstructure:"hostname" json:"host_name"`
|
||||||
Type string `yaml:"type" mapstructure:"type" json:"type"`
|
Type string `yaml:"type" mapstructure:"type" json:"type"`
|
||||||
|
IsCDN bool `yaml:"isCDN" mapstructure:"isCDN" json:"is_cdn"`
|
||||||
IDC string `yaml:"idc" mapstructure:"idc" json:"idc"`
|
IDC string `yaml:"idc" mapstructure:"idc" json:"idc"`
|
||||||
NetTopology string `yaml:"netTopology" mapstructure:"netTopology" json:"net_topology"`
|
NetTopology string `yaml:"netTopology" mapstructure:"netTopology" json:"net_topology"`
|
||||||
Location string `yaml:"location" mapstructure:"location" json:"location"`
|
Location string `yaml:"location" mapstructure:"location" json:"location"`
|
||||||
|
|
@ -75,34 +75,6 @@ type SeedPeerCluster struct {
|
||||||
Config []byte `yaml:"config" mapstructure:"config" json:"config"`
|
Config []byte `yaml:"config" mapstructure:"config" json:"config"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type CDN struct {
|
|
||||||
ID uint `yaml:"id" mapstructure:"id" json:"id"`
|
|
||||||
Hostname string `yaml:"hostname" mapstructure:"hostname" json:"host_name"`
|
|
||||||
IDC string `yaml:"idc" mapstructure:"idc" json:"idc"`
|
|
||||||
Location string `yaml:"location" mapstructure:"location" json:"location"`
|
|
||||||
IP string `yaml:"ip" mapstructure:"ip" json:"ip"`
|
|
||||||
Port int32 `yaml:"port" mapstructure:"port" json:"port"`
|
|
||||||
DownloadPort int32 `yaml:"downloadPort" mapstructure:"downloadPort" json:"download_port"`
|
|
||||||
CDNCluster *CDNCluster `yaml:"cdnCluster" mapstructure:"cdnCluster" json:"cdn_cluster"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CDN) GetCDNClusterConfig() (types.CDNClusterConfig, bool) {
|
|
||||||
if c.CDNCluster == nil {
|
|
||||||
return types.CDNClusterConfig{}, false
|
|
||||||
}
|
|
||||||
|
|
||||||
var config types.CDNClusterConfig
|
|
||||||
if err := json.Unmarshal(c.CDNCluster.Config, &config); err != nil {
|
|
||||||
return types.CDNClusterConfig{}, false
|
|
||||||
}
|
|
||||||
|
|
||||||
return config, true
|
|
||||||
}
|
|
||||||
|
|
||||||
type CDNCluster struct {
|
|
||||||
Config []byte `yaml:"config" mapstructure:"config" json:"config"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type SchedulerCluster struct {
|
type SchedulerCluster struct {
|
||||||
Config []byte `yaml:"config" mapstructure:"config" json:"config"`
|
Config []byte `yaml:"config" mapstructure:"config" json:"config"`
|
||||||
ClientConfig []byte `yaml:"clientConfig" mapstructure:"clientConfig" json:"client_config"`
|
ClientConfig []byte `yaml:"clientConfig" mapstructure:"clientConfig" json:"client_config"`
|
||||||
|
|
|
||||||
|
|
@ -62,14 +62,6 @@ func TestDynconfig_GetManagerSourceType(t *testing.T) {
|
||||||
sleep: func() {},
|
sleep: func() {},
|
||||||
mock: func(m *mocks.MockClientMockRecorder) {
|
mock: func(m *mocks.MockClientMockRecorder) {
|
||||||
m.GetScheduler(gomock.Any()).Return(&manager.Scheduler{
|
m.GetScheduler(gomock.Any()).Return(&manager.Scheduler{
|
||||||
Cdns: []*manager.CDN{
|
|
||||||
{
|
|
||||||
HostName: "foo",
|
|
||||||
Ip: "127.0.0.1",
|
|
||||||
Port: 8001,
|
|
||||||
DownloadPort: 8003,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
SeedPeers: []*manager.SeedPeer{
|
SeedPeers: []*manager.SeedPeer{
|
||||||
{
|
{
|
||||||
HostName: "bar",
|
HostName: "bar",
|
||||||
|
|
@ -82,10 +74,6 @@ func TestDynconfig_GetManagerSourceType(t *testing.T) {
|
||||||
},
|
},
|
||||||
expect: func(t *testing.T, data *DynconfigData, err error) {
|
expect: func(t *testing.T, data *DynconfigData, err error) {
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
assert.Equal(data.CDNs[0].Hostname, "foo")
|
|
||||||
assert.Equal(data.CDNs[0].IP, "127.0.0.1")
|
|
||||||
assert.Equal(data.CDNs[0].Port, int32(8001))
|
|
||||||
assert.Equal(data.CDNs[0].DownloadPort, int32(8003))
|
|
||||||
assert.Equal(data.SeedPeers[0].Hostname, "bar")
|
assert.Equal(data.SeedPeers[0].Hostname, "bar")
|
||||||
assert.Equal(data.SeedPeers[0].IP, "127.0.0.1")
|
assert.Equal(data.SeedPeers[0].IP, "127.0.0.1")
|
||||||
assert.Equal(data.SeedPeers[0].Port, int32(8001))
|
assert.Equal(data.SeedPeers[0].Port, int32(8001))
|
||||||
|
|
@ -106,14 +94,6 @@ func TestDynconfig_GetManagerSourceType(t *testing.T) {
|
||||||
mock: func(m *mocks.MockClientMockRecorder) {
|
mock: func(m *mocks.MockClientMockRecorder) {
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
m.GetScheduler(gomock.Any()).Return(&manager.Scheduler{
|
m.GetScheduler(gomock.Any()).Return(&manager.Scheduler{
|
||||||
Cdns: []*manager.CDN{
|
|
||||||
{
|
|
||||||
HostName: "foo",
|
|
||||||
Ip: "127.0.0.1",
|
|
||||||
Port: 8001,
|
|
||||||
DownloadPort: 8003,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
SeedPeers: []*manager.SeedPeer{
|
SeedPeers: []*manager.SeedPeer{
|
||||||
{
|
{
|
||||||
HostName: "bar",
|
HostName: "bar",
|
||||||
|
|
@ -128,10 +108,6 @@ func TestDynconfig_GetManagerSourceType(t *testing.T) {
|
||||||
},
|
},
|
||||||
expect: func(t *testing.T, data *DynconfigData, err error) {
|
expect: func(t *testing.T, data *DynconfigData, err error) {
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
assert.Equal(data.CDNs[0].Hostname, "foo")
|
|
||||||
assert.Equal(data.CDNs[0].IP, "127.0.0.1")
|
|
||||||
assert.Equal(data.CDNs[0].Port, int32(8001))
|
|
||||||
assert.Equal(data.CDNs[0].DownloadPort, int32(8003))
|
|
||||||
assert.Equal(data.SeedPeers[0].Hostname, "bar")
|
assert.Equal(data.SeedPeers[0].Hostname, "bar")
|
||||||
assert.Equal(data.SeedPeers[0].IP, "127.0.0.1")
|
assert.Equal(data.SeedPeers[0].IP, "127.0.0.1")
|
||||||
assert.Equal(data.SeedPeers[0].Port, int32(8001))
|
assert.Equal(data.SeedPeers[0].Port, int32(8001))
|
||||||
|
|
|
||||||
|
|
@ -133,7 +133,7 @@ var (
|
||||||
Subsystem: constants.SchedulerMetricsName,
|
Subsystem: constants.SchedulerMetricsName,
|
||||||
Name: "peer_host_traffic",
|
Name: "peer_host_traffic",
|
||||||
Help: "Counter of the number of per peer host traffic.",
|
Help: "Counter of the number of per peer host traffic.",
|
||||||
}, []string{"biz_tag", "traffic_type", "peer_host_uuid", "peer_host_ip"})
|
}, []string{"biz_tag", "traffic_type", "peer_host_id", "peer_host_ip"})
|
||||||
|
|
||||||
PeerTaskCounter = promauto.NewCounterVec(prometheus.CounterOpts{
|
PeerTaskCounter = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||||
Namespace: constants.MetricsNamespace,
|
Namespace: constants.MetricsNamespace,
|
||||||
|
|
|
||||||
|
|
@ -54,14 +54,6 @@ func WithUploadLoadLimit(limit int32) HostOption {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithIsCDN sets host's IsCDN.
|
|
||||||
func WithIsCDN(isCDN bool) HostOption {
|
|
||||||
return func(h *Host) *Host {
|
|
||||||
h.IsCDN = isCDN
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithHostType sets host's type.
|
// WithHostType sets host's type.
|
||||||
func WithHostType(hostType HostType) HostOption {
|
func WithHostType(hostType HostType) HostOption {
|
||||||
return func(h *Host) *Host {
|
return func(h *Host) *Host {
|
||||||
|
|
@ -115,9 +107,6 @@ type Host struct {
|
||||||
// PeerCount is peer count.
|
// PeerCount is peer count.
|
||||||
PeerCount *atomic.Int32
|
PeerCount *atomic.Int32
|
||||||
|
|
||||||
// IsCDN is used as tag cdn.
|
|
||||||
IsCDN bool
|
|
||||||
|
|
||||||
// CreateAt is host create time.
|
// CreateAt is host create time.
|
||||||
CreateAt *atomic.Time
|
CreateAt *atomic.Time
|
||||||
|
|
||||||
|
|
@ -131,7 +120,7 @@ type Host struct {
|
||||||
// New host instance.
|
// New host instance.
|
||||||
func NewHost(rawHost *scheduler.PeerHost, options ...HostOption) *Host {
|
func NewHost(rawHost *scheduler.PeerHost, options ...HostOption) *Host {
|
||||||
h := &Host{
|
h := &Host{
|
||||||
ID: rawHost.Uuid,
|
ID: rawHost.Id,
|
||||||
Type: HostTypeNormal,
|
Type: HostTypeNormal,
|
||||||
IP: rawHost.Ip,
|
IP: rawHost.Ip,
|
||||||
Hostname: rawHost.HostName,
|
Hostname: rawHost.HostName,
|
||||||
|
|
@ -145,10 +134,9 @@ func NewHost(rawHost *scheduler.PeerHost, options ...HostOption) *Host {
|
||||||
UploadPeerCount: atomic.NewInt32(0),
|
UploadPeerCount: atomic.NewInt32(0),
|
||||||
Peers: &sync.Map{},
|
Peers: &sync.Map{},
|
||||||
PeerCount: atomic.NewInt32(0),
|
PeerCount: atomic.NewInt32(0),
|
||||||
IsCDN: false,
|
|
||||||
CreateAt: atomic.NewTime(time.Now()),
|
CreateAt: atomic.NewTime(time.Now()),
|
||||||
UpdateAt: atomic.NewTime(time.Now()),
|
UpdateAt: atomic.NewTime(time.Now()),
|
||||||
Log: logger.WithHostID(rawHost.Uuid),
|
Log: logger.WithHostID(rawHost.Id),
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, opt := range options {
|
for _, opt := range options {
|
||||||
|
|
|
||||||
|
|
@ -28,7 +28,7 @@ import (
|
||||||
|
|
||||||
var (
|
var (
|
||||||
mockRawHost = &scheduler.PeerHost{
|
mockRawHost = &scheduler.PeerHost{
|
||||||
Uuid: idgen.HostID("hostname", 8003),
|
Id: idgen.HostID("hostname", 8003),
|
||||||
Ip: "127.0.0.1",
|
Ip: "127.0.0.1",
|
||||||
RpcPort: 8003,
|
RpcPort: 8003,
|
||||||
DownPort: 8001,
|
DownPort: 8001,
|
||||||
|
|
@ -40,23 +40,11 @@ var (
|
||||||
}
|
}
|
||||||
|
|
||||||
mockRawSeedHost = &scheduler.PeerHost{
|
mockRawSeedHost = &scheduler.PeerHost{
|
||||||
Uuid: idgen.SeedHostID("hostname", 8003),
|
Id: idgen.HostID("hostname_seed", 8003),
|
||||||
Ip: "127.0.0.1",
|
Ip: "127.0.0.1",
|
||||||
RpcPort: 8003,
|
RpcPort: 8003,
|
||||||
DownPort: 8001,
|
DownPort: 8001,
|
||||||
HostName: "hostname",
|
HostName: "hostname_seed",
|
||||||
SecurityDomain: "security_domain",
|
|
||||||
Location: "location",
|
|
||||||
Idc: "idc",
|
|
||||||
NetTopology: "net_topology",
|
|
||||||
}
|
|
||||||
|
|
||||||
mockRawCDNHost = &scheduler.PeerHost{
|
|
||||||
Uuid: idgen.CDNHostID("hostname", 8003),
|
|
||||||
Ip: "127.0.0.1",
|
|
||||||
RpcPort: 8003,
|
|
||||||
DownPort: 8001,
|
|
||||||
HostName: "hostname",
|
|
||||||
SecurityDomain: "security_domain",
|
SecurityDomain: "security_domain",
|
||||||
Location: "location",
|
Location: "location",
|
||||||
Idc: "idc",
|
Idc: "idc",
|
||||||
|
|
@ -76,7 +64,7 @@ func TestHost_NewHost(t *testing.T) {
|
||||||
rawHost: mockRawHost,
|
rawHost: mockRawHost,
|
||||||
expect: func(t *testing.T, host *Host) {
|
expect: func(t *testing.T, host *Host) {
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
assert.Equal(host.ID, mockRawHost.Uuid)
|
assert.Equal(host.ID, mockRawHost.Id)
|
||||||
assert.Equal(host.Type, HostTypeNormal)
|
assert.Equal(host.Type, HostTypeNormal)
|
||||||
assert.Equal(host.IP, mockRawHost.Ip)
|
assert.Equal(host.IP, mockRawHost.Ip)
|
||||||
assert.Equal(host.Port, mockRawHost.RpcPort)
|
assert.Equal(host.Port, mockRawHost.RpcPort)
|
||||||
|
|
@ -88,7 +76,6 @@ func TestHost_NewHost(t *testing.T) {
|
||||||
assert.Equal(host.NetTopology, mockRawHost.NetTopology)
|
assert.Equal(host.NetTopology, mockRawHost.NetTopology)
|
||||||
assert.Equal(host.UploadLoadLimit.Load(), int32(config.DefaultClientLoadLimit))
|
assert.Equal(host.UploadLoadLimit.Load(), int32(config.DefaultClientLoadLimit))
|
||||||
assert.Equal(host.PeerCount.Load(), int32(0))
|
assert.Equal(host.PeerCount.Load(), int32(0))
|
||||||
assert.Equal(host.IsCDN, false)
|
|
||||||
assert.NotEqual(host.CreateAt.Load(), 0)
|
assert.NotEqual(host.CreateAt.Load(), 0)
|
||||||
assert.NotEqual(host.UpdateAt.Load(), 0)
|
assert.NotEqual(host.UpdateAt.Load(), 0)
|
||||||
assert.NotNil(host.Log)
|
assert.NotNil(host.Log)
|
||||||
|
|
@ -97,10 +84,10 @@ func TestHost_NewHost(t *testing.T) {
|
||||||
{
|
{
|
||||||
name: "new seed host",
|
name: "new seed host",
|
||||||
rawHost: mockRawSeedHost,
|
rawHost: mockRawSeedHost,
|
||||||
options: []HostOption{WithHostType(HostTypeSuperSeed), WithIsCDN(true)},
|
options: []HostOption{WithHostType(HostTypeSuperSeed)},
|
||||||
expect: func(t *testing.T, host *Host) {
|
expect: func(t *testing.T, host *Host) {
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
assert.Equal(host.ID, mockRawSeedHost.Uuid)
|
assert.Equal(host.ID, mockRawSeedHost.Id)
|
||||||
assert.Equal(host.Type, HostTypeSuperSeed)
|
assert.Equal(host.Type, HostTypeSuperSeed)
|
||||||
assert.Equal(host.IP, mockRawSeedHost.Ip)
|
assert.Equal(host.IP, mockRawSeedHost.Ip)
|
||||||
assert.Equal(host.Port, mockRawSeedHost.RpcPort)
|
assert.Equal(host.Port, mockRawSeedHost.RpcPort)
|
||||||
|
|
@ -112,7 +99,6 @@ func TestHost_NewHost(t *testing.T) {
|
||||||
assert.Equal(host.NetTopology, mockRawSeedHost.NetTopology)
|
assert.Equal(host.NetTopology, mockRawSeedHost.NetTopology)
|
||||||
assert.Equal(host.UploadLoadLimit.Load(), int32(config.DefaultClientLoadLimit))
|
assert.Equal(host.UploadLoadLimit.Load(), int32(config.DefaultClientLoadLimit))
|
||||||
assert.Equal(host.PeerCount.Load(), int32(0))
|
assert.Equal(host.PeerCount.Load(), int32(0))
|
||||||
assert.Equal(host.IsCDN, true)
|
|
||||||
assert.NotEqual(host.CreateAt.Load(), 0)
|
assert.NotEqual(host.CreateAt.Load(), 0)
|
||||||
assert.NotEqual(host.UpdateAt.Load(), 0)
|
assert.NotEqual(host.UpdateAt.Load(), 0)
|
||||||
assert.NotNil(host.Log)
|
assert.NotNil(host.Log)
|
||||||
|
|
@ -124,7 +110,7 @@ func TestHost_NewHost(t *testing.T) {
|
||||||
options: []HostOption{WithUploadLoadLimit(200)},
|
options: []HostOption{WithUploadLoadLimit(200)},
|
||||||
expect: func(t *testing.T, host *Host) {
|
expect: func(t *testing.T, host *Host) {
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
assert.Equal(host.ID, mockRawHost.Uuid)
|
assert.Equal(host.ID, mockRawHost.Id)
|
||||||
assert.Equal(host.Type, HostTypeNormal)
|
assert.Equal(host.Type, HostTypeNormal)
|
||||||
assert.Equal(host.IP, mockRawHost.Ip)
|
assert.Equal(host.IP, mockRawHost.Ip)
|
||||||
assert.Equal(host.Port, mockRawHost.RpcPort)
|
assert.Equal(host.Port, mockRawHost.RpcPort)
|
||||||
|
|
@ -136,7 +122,6 @@ func TestHost_NewHost(t *testing.T) {
|
||||||
assert.Equal(host.NetTopology, mockRawHost.NetTopology)
|
assert.Equal(host.NetTopology, mockRawHost.NetTopology)
|
||||||
assert.Equal(host.UploadLoadLimit.Load(), int32(200))
|
assert.Equal(host.UploadLoadLimit.Load(), int32(200))
|
||||||
assert.Equal(host.PeerCount.Load(), int32(0))
|
assert.Equal(host.PeerCount.Load(), int32(0))
|
||||||
assert.Equal(host.IsCDN, false)
|
|
||||||
assert.NotEqual(host.CreateAt.Load(), 0)
|
assert.NotEqual(host.CreateAt.Load(), 0)
|
||||||
assert.NotEqual(host.UpdateAt.Load(), 0)
|
assert.NotEqual(host.UpdateAt.Load(), 0)
|
||||||
assert.NotNil(host.Log)
|
assert.NotNil(host.Log)
|
||||||
|
|
|
||||||
|
|
@ -118,11 +118,12 @@ func TestResource_New(t *testing.T) {
|
||||||
dynconfig.Get().Return(&config.DynconfigData{
|
dynconfig.Get().Return(&config.DynconfigData{
|
||||||
SeedPeers: []*config.SeedPeer{},
|
SeedPeers: []*config.SeedPeer{},
|
||||||
}, nil).Times(1),
|
}, nil).Times(1),
|
||||||
|
dynconfig.Register(gomock.Any()).Return().Times(1),
|
||||||
)
|
)
|
||||||
},
|
},
|
||||||
expect: func(t *testing.T, resource Resource, err error) {
|
expect: func(t *testing.T, resource Resource, err error) {
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
assert.EqualError(err, "address list of cdn is empty")
|
assert.NoError(err)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
|
||||||
|
|
@ -137,10 +137,10 @@ func (s *seedPeer) initSeedPeer(task *Task, ps *cdnsystem.PieceSeed) (*Peer, err
|
||||||
task.Log.Infof("can not find seed peer: %s", ps.PeerId)
|
task.Log.Infof("can not find seed peer: %s", ps.PeerId)
|
||||||
|
|
||||||
// Load host from manager.
|
// Load host from manager.
|
||||||
host, ok := s.hostManager.Load(ps.HostUuid)
|
host, ok := s.hostManager.Load(ps.HostId)
|
||||||
if !ok {
|
if !ok {
|
||||||
task.Log.Errorf("can not find seed host uuid: %s", ps.HostUuid)
|
task.Log.Errorf("can not find seed host id: %s", ps.HostId)
|
||||||
return nil, errors.Errorf("can not find host uuid: %s", ps.HostUuid)
|
return nil, errors.Errorf("can not find host id: %s", ps.HostId)
|
||||||
}
|
}
|
||||||
|
|
||||||
// New seed peer.
|
// New seed peer.
|
||||||
|
|
|
||||||
|
|
@ -60,22 +60,13 @@ func newSeedPeerClient(dynconfig config.DynconfigInterface, hostManager HostMana
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize seed peer grpc client.
|
// Initialize seed peer grpc client.
|
||||||
netAddrs := append(seedPeersToNetAddrs(config.SeedPeers), cdnsToNetAddrs(config.CDNs)...)
|
client := client.GetClientByAddr(seedPeersToNetAddrs(config.SeedPeers), opts...)
|
||||||
client, err := client.GetClientByAddr(netAddrs, opts...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize seed hosts.
|
// Initialize seed hosts.
|
||||||
for _, host := range seedPeersToHosts(config.SeedPeers) {
|
for _, host := range seedPeersToHosts(config.SeedPeers) {
|
||||||
hostManager.Store(host)
|
hostManager.Store(host)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize cdn hosts.
|
|
||||||
for _, host := range cdnsToHosts(config.CDNs) {
|
|
||||||
hostManager.Store(host)
|
|
||||||
}
|
|
||||||
|
|
||||||
dc := &seedPeerClient{
|
dc := &seedPeerClient{
|
||||||
hostManager: hostManager,
|
hostManager: hostManager,
|
||||||
CdnClient: client,
|
CdnClient: client,
|
||||||
|
|
@ -93,13 +84,8 @@ func (sc *seedPeerClient) OnNotify(data *config.DynconfigData) {
|
||||||
seedPeers = append(seedPeers, *seedPeer)
|
seedPeers = append(seedPeers, *seedPeer)
|
||||||
}
|
}
|
||||||
|
|
||||||
var cdns []config.CDN
|
|
||||||
for _, cdn := range data.CDNs {
|
|
||||||
cdns = append(cdns, *cdn)
|
|
||||||
}
|
|
||||||
|
|
||||||
if reflect.DeepEqual(sc.data, data) {
|
if reflect.DeepEqual(sc.data, data) {
|
||||||
logger.Infof("addresses deep equal: %#v %#v", seedPeers, cdns)
|
logger.Infof("addresses deep equal: %#v", seedPeers)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -107,7 +93,17 @@ func (sc *seedPeerClient) OnNotify(data *config.DynconfigData) {
|
||||||
// the seed peer needs to be cleared.
|
// the seed peer needs to be cleared.
|
||||||
diffSeedPeers := diffSeedPeers(sc.data.SeedPeers, data.SeedPeers)
|
diffSeedPeers := diffSeedPeers(sc.data.SeedPeers, data.SeedPeers)
|
||||||
for _, seedPeer := range diffSeedPeers {
|
for _, seedPeer := range diffSeedPeers {
|
||||||
id := idgen.SeedHostID(seedPeer.Hostname, seedPeer.Port)
|
if seedPeer.IsCDN {
|
||||||
|
id := idgen.CDNHostID(seedPeer.Hostname, seedPeer.Port)
|
||||||
|
if host, ok := sc.hostManager.Load(id); ok {
|
||||||
|
host.LeavePeers()
|
||||||
|
sc.hostManager.Delete(id)
|
||||||
|
}
|
||||||
|
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
id := idgen.HostID(seedPeer.Hostname, seedPeer.Port)
|
||||||
if host, ok := sc.hostManager.Load(id); ok {
|
if host, ok := sc.hostManager.Load(id); ok {
|
||||||
host.LeavePeers()
|
host.LeavePeers()
|
||||||
sc.hostManager.Delete(id)
|
sc.hostManager.Delete(id)
|
||||||
|
|
@ -119,29 +115,12 @@ func (sc *seedPeerClient) OnNotify(data *config.DynconfigData) {
|
||||||
sc.hostManager.Store(host)
|
sc.hostManager.Store(host)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If only the ip of the cdn host is changed,
|
|
||||||
// the cdn peer needs to be cleared.
|
|
||||||
diffCDNs := diffCDNs(sc.data.CDNs, data.CDNs)
|
|
||||||
for _, cdn := range diffCDNs {
|
|
||||||
id := idgen.CDNHostID(cdn.Hostname, cdn.Port)
|
|
||||||
if host, ok := sc.hostManager.Load(id); ok {
|
|
||||||
host.LeavePeers()
|
|
||||||
sc.hostManager.Delete(id)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update cdn in host manager.
|
|
||||||
for _, host := range cdnsToHosts(data.CDNs) {
|
|
||||||
sc.hostManager.Store(host)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update dynamic data.
|
// Update dynamic data.
|
||||||
sc.data = data
|
sc.data = data
|
||||||
|
|
||||||
// Update grpc seed peer addresses.
|
// Update grpc seed peer addresses.
|
||||||
netAddrs := append(seedPeersToNetAddrs(data.SeedPeers), cdnsToNetAddrs(data.CDNs)...)
|
sc.UpdateState(seedPeersToNetAddrs(data.SeedPeers))
|
||||||
sc.UpdateState(netAddrs)
|
logger.Infof("addresses have been updated: %#v", seedPeers)
|
||||||
logger.Infof("addresses have been updated: %#v %#v", seedPeers, cdns)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// seedPeersToHosts coverts []*config.SeedPeer to map[string]*Host.
|
// seedPeersToHosts coverts []*config.SeedPeer to map[string]*Host.
|
||||||
|
|
@ -153,9 +132,25 @@ func seedPeersToHosts(seedPeers []*config.SeedPeer) map[string]*Host {
|
||||||
options = append(options, WithUploadLoadLimit(int32(config.LoadLimit)))
|
options = append(options, WithUploadLoadLimit(int32(config.LoadLimit)))
|
||||||
}
|
}
|
||||||
|
|
||||||
id := idgen.SeedHostID(seedPeer.Hostname, seedPeer.Port)
|
if seedPeer.IsCDN {
|
||||||
|
id := idgen.CDNHostID(seedPeer.Hostname, seedPeer.Port)
|
||||||
|
hosts[id] = NewHost(&rpcscheduler.PeerHost{
|
||||||
|
Id: id,
|
||||||
|
Ip: seedPeer.IP,
|
||||||
|
RpcPort: seedPeer.Port,
|
||||||
|
DownPort: seedPeer.DownloadPort,
|
||||||
|
HostName: seedPeer.Hostname,
|
||||||
|
Idc: seedPeer.IDC,
|
||||||
|
Location: seedPeer.Location,
|
||||||
|
NetTopology: seedPeer.NetTopology,
|
||||||
|
}, options...)
|
||||||
|
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
id := idgen.HostID(seedPeer.Hostname, seedPeer.Port)
|
||||||
hosts[id] = NewHost(&rpcscheduler.PeerHost{
|
hosts[id] = NewHost(&rpcscheduler.PeerHost{
|
||||||
Uuid: id,
|
Id: id,
|
||||||
Ip: seedPeer.IP,
|
Ip: seedPeer.IP,
|
||||||
RpcPort: seedPeer.Port,
|
RpcPort: seedPeer.Port,
|
||||||
DownPort: seedPeer.DownloadPort,
|
DownPort: seedPeer.DownloadPort,
|
||||||
|
|
@ -222,86 +217,16 @@ func diffSeedPeers(sx []*config.SeedPeer, sy []*config.SeedPeer) []*config.SeedP
|
||||||
for _, x := range sx {
|
for _, x := range sx {
|
||||||
found := false
|
found := false
|
||||||
for _, y := range sy {
|
for _, y := range sy {
|
||||||
if idgen.SeedHostID(x.Hostname, x.Port) == idgen.SeedHostID(y.Hostname, y.Port) {
|
if x.IsCDN {
|
||||||
found = true
|
if idgen.CDNHostID(x.Hostname, x.Port) == idgen.CDNHostID(y.Hostname, y.Port) {
|
||||||
break
|
found = true
|
||||||
}
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
if !found {
|
continue
|
||||||
diff = append(diff, x)
|
}
|
||||||
}
|
|
||||||
}
|
if idgen.HostID(x.Hostname, x.Port) == idgen.HostID(y.Hostname, y.Port) {
|
||||||
|
|
||||||
return diff
|
|
||||||
}
|
|
||||||
|
|
||||||
// cdnsToHosts coverts []*config.CDN to map[string]*Host.
|
|
||||||
func cdnsToHosts(cdns []*config.CDN) map[string]*Host {
|
|
||||||
hosts := map[string]*Host{}
|
|
||||||
for _, cdn := range cdns {
|
|
||||||
var netTopology string
|
|
||||||
options := []HostOption{WithHostType(HostTypeSuperSeed), WithIsCDN(true)}
|
|
||||||
if config, ok := cdn.GetCDNClusterConfig(); ok && config.LoadLimit > 0 {
|
|
||||||
options = append(options, WithUploadLoadLimit(int32(config.LoadLimit)))
|
|
||||||
netTopology = config.NetTopology
|
|
||||||
}
|
|
||||||
|
|
||||||
id := idgen.CDNHostID(cdn.Hostname, cdn.Port)
|
|
||||||
hosts[id] = NewHost(&rpcscheduler.PeerHost{
|
|
||||||
Uuid: id,
|
|
||||||
Ip: cdn.IP,
|
|
||||||
RpcPort: cdn.Port,
|
|
||||||
DownPort: cdn.DownloadPort,
|
|
||||||
HostName: cdn.Hostname,
|
|
||||||
Idc: cdn.IDC,
|
|
||||||
Location: cdn.Location,
|
|
||||||
NetTopology: netTopology,
|
|
||||||
}, options...)
|
|
||||||
}
|
|
||||||
return hosts
|
|
||||||
}
|
|
||||||
|
|
||||||
// cdnsToNetAddrs coverts []*config.CDN to []dfnet.NetAddr.
|
|
||||||
func cdnsToNetAddrs(cdns []*config.CDN) []dfnet.NetAddr {
|
|
||||||
netAddrs := make([]dfnet.NetAddr, 0, len(cdns))
|
|
||||||
for _, cdn := range cdns {
|
|
||||||
netAddrs = append(netAddrs, dfnet.NetAddr{
|
|
||||||
Type: dfnet.TCP,
|
|
||||||
Addr: fmt.Sprintf("%s:%d", cdn.IP, cdn.Port),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return netAddrs
|
|
||||||
}
|
|
||||||
|
|
||||||
// diffCDNs find out different cdns.
|
|
||||||
func diffCDNs(cx []*config.CDN, cy []*config.CDN) []*config.CDN {
|
|
||||||
// Get cdns with the same HostID but different IP.
|
|
||||||
var diff []*config.CDN
|
|
||||||
for _, x := range cx {
|
|
||||||
for _, y := range cy {
|
|
||||||
if x.Hostname != y.Hostname {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if x.Port != y.Port {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if x.IP == y.IP {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
diff = append(diff, x)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the removed cdns.
|
|
||||||
for _, x := range cx {
|
|
||||||
found := false
|
|
||||||
for _, y := range cy {
|
|
||||||
if idgen.CDNHostID(x.Hostname, x.Port) == idgen.CDNHostID(y.Hostname, y.Port) {
|
|
||||||
found = true
|
found = true
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -54,22 +54,6 @@ func TestSeedPeerClient_newSeedPeerClient(t *testing.T) {
|
||||||
assert.NoError(err)
|
assert.NoError(err)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
|
||||||
name: "new seed peer client with cdn",
|
|
||||||
mock: func(dynconfig *configmocks.MockDynconfigInterfaceMockRecorder, hostManager *MockHostManagerMockRecorder) {
|
|
||||||
gomock.InOrder(
|
|
||||||
dynconfig.Get().Return(&config.DynconfigData{
|
|
||||||
CDNs: []*config.CDN{{ID: 1}},
|
|
||||||
}, nil).Times(1),
|
|
||||||
hostManager.Store(gomock.Any()).Return().Times(1),
|
|
||||||
dynconfig.Register(gomock.Any()).Return().Times(1),
|
|
||||||
)
|
|
||||||
},
|
|
||||||
expect: func(t *testing.T, err error) {
|
|
||||||
assert := assert.New(t)
|
|
||||||
assert.NoError(err)
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
name: "new seed peer client failed because of dynconfig get error data",
|
name: "new seed peer client failed because of dynconfig get error data",
|
||||||
mock: func(dynconfig *configmocks.MockDynconfigInterfaceMockRecorder, hostManager *MockHostManagerMockRecorder) {
|
mock: func(dynconfig *configmocks.MockDynconfigInterfaceMockRecorder, hostManager *MockHostManagerMockRecorder) {
|
||||||
|
|
@ -87,11 +71,12 @@ func TestSeedPeerClient_newSeedPeerClient(t *testing.T) {
|
||||||
dynconfig.Get().Return(&config.DynconfigData{
|
dynconfig.Get().Return(&config.DynconfigData{
|
||||||
SeedPeers: []*config.SeedPeer{},
|
SeedPeers: []*config.SeedPeer{},
|
||||||
}, nil).Times(1),
|
}, nil).Times(1),
|
||||||
|
dynconfig.Register(gomock.Any()).Return().Times(1),
|
||||||
)
|
)
|
||||||
},
|
},
|
||||||
expect: func(t *testing.T, err error) {
|
expect: func(t *testing.T, err error) {
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
assert.EqualError(err, "address list of cdn is empty")
|
assert.NoError(err)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
@ -125,12 +110,6 @@ func TestSeedPeerClient_OnNotify(t *testing.T) {
|
||||||
IP: "0.0.0.0",
|
IP: "0.0.0.0",
|
||||||
Port: 8080,
|
Port: 8080,
|
||||||
}},
|
}},
|
||||||
CDNs: []*config.CDN{{
|
|
||||||
ID: 1,
|
|
||||||
Hostname: "foo",
|
|
||||||
IP: "0.0.0.0",
|
|
||||||
Port: 8080,
|
|
||||||
}},
|
|
||||||
},
|
},
|
||||||
mock: func(dynconfig *configmocks.MockDynconfigInterfaceMockRecorder, hostManager *MockHostManagerMockRecorder) {
|
mock: func(dynconfig *configmocks.MockDynconfigInterfaceMockRecorder, hostManager *MockHostManagerMockRecorder) {
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
|
|
@ -141,14 +120,8 @@ func TestSeedPeerClient_OnNotify(t *testing.T) {
|
||||||
IP: "0.0.0.0",
|
IP: "0.0.0.0",
|
||||||
Port: 8080,
|
Port: 8080,
|
||||||
}},
|
}},
|
||||||
CDNs: []*config.CDN{{
|
|
||||||
ID: 1,
|
|
||||||
Hostname: "foo",
|
|
||||||
IP: "0.0.0.0",
|
|
||||||
Port: 8080,
|
|
||||||
}},
|
|
||||||
}, nil).Times(1),
|
}, nil).Times(1),
|
||||||
hostManager.Store(gomock.Any()).Return().Times(2),
|
hostManager.Store(gomock.Any()).Return().Times(1),
|
||||||
dynconfig.Register(gomock.Any()).Return().Times(1),
|
dynconfig.Register(gomock.Any()).Return().Times(1),
|
||||||
)
|
)
|
||||||
},
|
},
|
||||||
|
|
@ -161,11 +134,6 @@ func TestSeedPeerClient_OnNotify(t *testing.T) {
|
||||||
Hostname: "foo",
|
Hostname: "foo",
|
||||||
IP: "0.0.0.0",
|
IP: "0.0.0.0",
|
||||||
}},
|
}},
|
||||||
CDNs: []*config.CDN{{
|
|
||||||
ID: 1,
|
|
||||||
Hostname: "foo",
|
|
||||||
IP: "0.0.0.0",
|
|
||||||
}},
|
|
||||||
},
|
},
|
||||||
mock: func(dynconfig *configmocks.MockDynconfigInterfaceMockRecorder, hostManager *MockHostManagerMockRecorder) {
|
mock: func(dynconfig *configmocks.MockDynconfigInterfaceMockRecorder, hostManager *MockHostManagerMockRecorder) {
|
||||||
mockHost := NewHost(mockRawHost)
|
mockHost := NewHost(mockRawHost)
|
||||||
|
|
@ -176,19 +144,11 @@ func TestSeedPeerClient_OnNotify(t *testing.T) {
|
||||||
Hostname: "foo",
|
Hostname: "foo",
|
||||||
IP: "127.0.0.1",
|
IP: "127.0.0.1",
|
||||||
}},
|
}},
|
||||||
CDNs: []*config.CDN{{
|
|
||||||
ID: 1,
|
|
||||||
Hostname: "foo",
|
|
||||||
IP: "127.0.0.1",
|
|
||||||
}},
|
|
||||||
}, nil).Times(1),
|
}, nil).Times(1),
|
||||||
hostManager.Store(gomock.Any()).Return().Times(2),
|
hostManager.Store(gomock.Any()).Return().Times(1),
|
||||||
dynconfig.Register(gomock.Any()).Return().Times(1),
|
dynconfig.Register(gomock.Any()).Return().Times(1),
|
||||||
hostManager.Load(gomock.Any()).Return(mockHost, true).Times(1),
|
hostManager.Load(gomock.Any()).Return(mockHost, true).Times(1),
|
||||||
hostManager.Delete(gomock.Eq("foo-0_Seed")).Return().Times(1),
|
hostManager.Delete(gomock.Eq("foo-0")).Return().Times(1),
|
||||||
hostManager.Store(gomock.Any()).Return().Times(1),
|
|
||||||
hostManager.Load(gomock.Any()).Return(mockHost, true).Times(1),
|
|
||||||
hostManager.Delete(gomock.Eq("foo-0_CDN")).Return().Times(1),
|
|
||||||
hostManager.Store(gomock.Any()).Return().Times(1),
|
hostManager.Store(gomock.Any()).Return().Times(1),
|
||||||
)
|
)
|
||||||
},
|
},
|
||||||
|
|
@ -201,11 +161,6 @@ func TestSeedPeerClient_OnNotify(t *testing.T) {
|
||||||
Hostname: "foo",
|
Hostname: "foo",
|
||||||
IP: "0.0.0.0",
|
IP: "0.0.0.0",
|
||||||
}},
|
}},
|
||||||
CDNs: []*config.CDN{{
|
|
||||||
ID: 1,
|
|
||||||
Hostname: "foo",
|
|
||||||
IP: "0.0.0.0",
|
|
||||||
}},
|
|
||||||
},
|
},
|
||||||
mock: func(dynconfig *configmocks.MockDynconfigInterfaceMockRecorder, hostManager *MockHostManagerMockRecorder) {
|
mock: func(dynconfig *configmocks.MockDynconfigInterfaceMockRecorder, hostManager *MockHostManagerMockRecorder) {
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
|
|
@ -215,16 +170,9 @@ func TestSeedPeerClient_OnNotify(t *testing.T) {
|
||||||
Hostname: "foo",
|
Hostname: "foo",
|
||||||
IP: "127.0.0.1",
|
IP: "127.0.0.1",
|
||||||
}},
|
}},
|
||||||
CDNs: []*config.CDN{{
|
|
||||||
ID: 1,
|
|
||||||
Hostname: "foo",
|
|
||||||
IP: "127.0.0.1",
|
|
||||||
}},
|
|
||||||
}, nil).Times(1),
|
}, nil).Times(1),
|
||||||
hostManager.Store(gomock.Any()).Return().Times(2),
|
|
||||||
dynconfig.Register(gomock.Any()).Return().Times(1),
|
|
||||||
hostManager.Load(gomock.Any()).Return(nil, false).Times(1),
|
|
||||||
hostManager.Store(gomock.Any()).Return().Times(1),
|
hostManager.Store(gomock.Any()).Return().Times(1),
|
||||||
|
dynconfig.Register(gomock.Any()).Return().Times(1),
|
||||||
hostManager.Load(gomock.Any()).Return(nil, false).Times(1),
|
hostManager.Load(gomock.Any()).Return(nil, false).Times(1),
|
||||||
hostManager.Store(gomock.Any()).Return().Times(1),
|
hostManager.Store(gomock.Any()).Return().Times(1),
|
||||||
)
|
)
|
||||||
|
|
@ -237,10 +185,6 @@ func TestSeedPeerClient_OnNotify(t *testing.T) {
|
||||||
ID: 1,
|
ID: 1,
|
||||||
IP: "127.0.0.1",
|
IP: "127.0.0.1",
|
||||||
}},
|
}},
|
||||||
CDNs: []*config.CDN{{
|
|
||||||
ID: 1,
|
|
||||||
IP: "127.0.0.1",
|
|
||||||
}},
|
|
||||||
},
|
},
|
||||||
mock: func(dynconfig *configmocks.MockDynconfigInterfaceMockRecorder, hostManager *MockHostManagerMockRecorder) {
|
mock: func(dynconfig *configmocks.MockDynconfigInterfaceMockRecorder, hostManager *MockHostManagerMockRecorder) {
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
|
|
@ -249,12 +193,8 @@ func TestSeedPeerClient_OnNotify(t *testing.T) {
|
||||||
ID: 1,
|
ID: 1,
|
||||||
IP: "127.0.0.1",
|
IP: "127.0.0.1",
|
||||||
}},
|
}},
|
||||||
CDNs: []*config.CDN{{
|
|
||||||
ID: 1,
|
|
||||||
IP: "127.0.0.1",
|
|
||||||
}},
|
|
||||||
}, nil).Times(1),
|
}, nil).Times(1),
|
||||||
hostManager.Store(gomock.Any()).Return().Times(2),
|
hostManager.Store(gomock.Any()).Return().Times(1),
|
||||||
dynconfig.Register(gomock.Any()).Return().Times(1),
|
dynconfig.Register(gomock.Any()).Return().Times(1),
|
||||||
)
|
)
|
||||||
},
|
},
|
||||||
|
|
@ -311,21 +251,20 @@ func TestSeedPeerClient_seedPeersToHosts(t *testing.T) {
|
||||||
},
|
},
|
||||||
expect: func(t *testing.T, hosts map[string]*Host) {
|
expect: func(t *testing.T, hosts map[string]*Host) {
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
assert.Equal(hosts[mockRawSeedHost.Uuid].ID, mockRawSeedHost.Uuid)
|
assert.Equal(hosts[mockRawSeedHost.Id].ID, mockRawSeedHost.Id)
|
||||||
assert.Equal(hosts[mockRawSeedHost.Uuid].Type, HostTypeSuperSeed)
|
assert.Equal(hosts[mockRawSeedHost.Id].Type, HostTypeSuperSeed)
|
||||||
assert.Equal(hosts[mockRawSeedHost.Uuid].IP, mockRawSeedHost.Ip)
|
assert.Equal(hosts[mockRawSeedHost.Id].IP, mockRawSeedHost.Ip)
|
||||||
assert.Equal(hosts[mockRawSeedHost.Uuid].Hostname, mockRawSeedHost.HostName)
|
assert.Equal(hosts[mockRawSeedHost.Id].Hostname, mockRawSeedHost.HostName)
|
||||||
assert.Equal(hosts[mockRawSeedHost.Uuid].Port, mockRawSeedHost.RpcPort)
|
assert.Equal(hosts[mockRawSeedHost.Id].Port, mockRawSeedHost.RpcPort)
|
||||||
assert.Equal(hosts[mockRawSeedHost.Uuid].DownloadPort, mockRawSeedHost.DownPort)
|
assert.Equal(hosts[mockRawSeedHost.Id].DownloadPort, mockRawSeedHost.DownPort)
|
||||||
assert.Equal(hosts[mockRawSeedHost.Uuid].IDC, mockRawSeedHost.Idc)
|
assert.Equal(hosts[mockRawSeedHost.Id].IDC, mockRawSeedHost.Idc)
|
||||||
assert.Equal(hosts[mockRawSeedHost.Uuid].NetTopology, mockRawSeedHost.NetTopology)
|
assert.Equal(hosts[mockRawSeedHost.Id].NetTopology, mockRawSeedHost.NetTopology)
|
||||||
assert.Equal(hosts[mockRawSeedHost.Uuid].Location, mockRawSeedHost.Location)
|
assert.Equal(hosts[mockRawSeedHost.Id].Location, mockRawSeedHost.Location)
|
||||||
assert.Equal(hosts[mockRawSeedHost.Uuid].UploadLoadLimit.Load(), int32(10))
|
assert.Equal(hosts[mockRawSeedHost.Id].UploadLoadLimit.Load(), int32(10))
|
||||||
assert.Empty(hosts[mockRawSeedHost.Uuid].Peers)
|
assert.Empty(hosts[mockRawSeedHost.Id].Peers)
|
||||||
assert.Equal(hosts[mockRawSeedHost.Uuid].IsCDN, false)
|
assert.NotEqual(hosts[mockRawSeedHost.Id].CreateAt.Load(), 0)
|
||||||
assert.NotEqual(hosts[mockRawSeedHost.Uuid].CreateAt.Load(), 0)
|
assert.NotEqual(hosts[mockRawSeedHost.Id].UpdateAt.Load(), 0)
|
||||||
assert.NotEqual(hosts[mockRawSeedHost.Uuid].UpdateAt.Load(), 0)
|
assert.NotNil(hosts[mockRawSeedHost.Id].Log)
|
||||||
assert.NotNil(hosts[mockRawSeedHost.Uuid].Log)
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
@ -345,21 +284,20 @@ func TestSeedPeerClient_seedPeersToHosts(t *testing.T) {
|
||||||
},
|
},
|
||||||
expect: func(t *testing.T, hosts map[string]*Host) {
|
expect: func(t *testing.T, hosts map[string]*Host) {
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
assert.Equal(hosts[mockRawSeedHost.Uuid].ID, mockRawSeedHost.Uuid)
|
assert.Equal(hosts[mockRawSeedHost.Id].ID, mockRawSeedHost.Id)
|
||||||
assert.Equal(hosts[mockRawSeedHost.Uuid].Type, HostTypeSuperSeed)
|
assert.Equal(hosts[mockRawSeedHost.Id].Type, HostTypeSuperSeed)
|
||||||
assert.Equal(hosts[mockRawSeedHost.Uuid].IP, mockRawSeedHost.Ip)
|
assert.Equal(hosts[mockRawSeedHost.Id].IP, mockRawSeedHost.Ip)
|
||||||
assert.Equal(hosts[mockRawSeedHost.Uuid].Hostname, mockRawSeedHost.HostName)
|
assert.Equal(hosts[mockRawSeedHost.Id].Hostname, mockRawSeedHost.HostName)
|
||||||
assert.Equal(hosts[mockRawSeedHost.Uuid].Port, mockRawSeedHost.RpcPort)
|
assert.Equal(hosts[mockRawSeedHost.Id].Port, mockRawSeedHost.RpcPort)
|
||||||
assert.Equal(hosts[mockRawSeedHost.Uuid].DownloadPort, mockRawSeedHost.DownPort)
|
assert.Equal(hosts[mockRawSeedHost.Id].DownloadPort, mockRawSeedHost.DownPort)
|
||||||
assert.Equal(hosts[mockRawSeedHost.Uuid].IDC, mockRawSeedHost.Idc)
|
assert.Equal(hosts[mockRawSeedHost.Id].IDC, mockRawSeedHost.Idc)
|
||||||
assert.Equal(hosts[mockRawSeedHost.Uuid].NetTopology, mockRawSeedHost.NetTopology)
|
assert.Equal(hosts[mockRawSeedHost.Id].NetTopology, mockRawSeedHost.NetTopology)
|
||||||
assert.Equal(hosts[mockRawSeedHost.Uuid].Location, mockRawSeedHost.Location)
|
assert.Equal(hosts[mockRawSeedHost.Id].Location, mockRawSeedHost.Location)
|
||||||
assert.Equal(hosts[mockRawSeedHost.Uuid].UploadLoadLimit.Load(), int32(config.DefaultClientLoadLimit))
|
assert.Equal(hosts[mockRawSeedHost.Id].UploadLoadLimit.Load(), int32(config.DefaultClientLoadLimit))
|
||||||
assert.Empty(hosts[mockRawSeedHost.Uuid].Peers)
|
assert.Empty(hosts[mockRawSeedHost.Id].Peers)
|
||||||
assert.Equal(hosts[mockRawSeedHost.Uuid].IsCDN, false)
|
assert.NotEqual(hosts[mockRawSeedHost.Id].CreateAt.Load(), 0)
|
||||||
assert.NotEqual(hosts[mockRawSeedHost.Uuid].CreateAt.Load(), 0)
|
assert.NotEqual(hosts[mockRawSeedHost.Id].UpdateAt.Load(), 0)
|
||||||
assert.NotEqual(hosts[mockRawSeedHost.Uuid].UpdateAt.Load(), 0)
|
assert.NotNil(hosts[mockRawSeedHost.Id].Log)
|
||||||
assert.NotNil(hosts[mockRawSeedHost.Uuid].Log)
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
@ -623,344 +561,3 @@ func TestSeedPeerClient_diffSeedPeers(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSeedPeerClient_cdnsToHosts(t *testing.T) {
|
|
||||||
mockCDNClusterConfig, err := json.Marshal(&types.CDNClusterConfig{
|
|
||||||
LoadLimit: 10,
|
|
||||||
NetTopology: "foo",
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
cdns []*config.CDN
|
|
||||||
expect func(t *testing.T, hosts map[string]*Host)
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "cdns covert to hosts",
|
|
||||||
cdns: []*config.CDN{
|
|
||||||
{
|
|
||||||
ID: 1,
|
|
||||||
Hostname: mockRawCDNHost.HostName,
|
|
||||||
IP: mockRawCDNHost.Ip,
|
|
||||||
Port: mockRawCDNHost.RpcPort,
|
|
||||||
DownloadPort: mockRawCDNHost.DownPort,
|
|
||||||
Location: mockRawCDNHost.Location,
|
|
||||||
IDC: mockRawCDNHost.Idc,
|
|
||||||
CDNCluster: &config.CDNCluster{
|
|
||||||
Config: mockCDNClusterConfig,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
expect: func(t *testing.T, hosts map[string]*Host) {
|
|
||||||
assert := assert.New(t)
|
|
||||||
assert.Equal(hosts[mockRawCDNHost.Uuid].ID, mockRawCDNHost.Uuid)
|
|
||||||
assert.Equal(hosts[mockRawCDNHost.Uuid].Type, HostTypeSuperSeed)
|
|
||||||
assert.Equal(hosts[mockRawCDNHost.Uuid].IP, mockRawCDNHost.Ip)
|
|
||||||
assert.Equal(hosts[mockRawCDNHost.Uuid].Hostname, mockRawCDNHost.HostName)
|
|
||||||
assert.Equal(hosts[mockRawCDNHost.Uuid].Port, mockRawCDNHost.RpcPort)
|
|
||||||
assert.Equal(hosts[mockRawCDNHost.Uuid].DownloadPort, mockRawCDNHost.DownPort)
|
|
||||||
assert.Equal(hosts[mockRawCDNHost.Uuid].IDC, mockRawCDNHost.Idc)
|
|
||||||
assert.Equal(hosts[mockRawCDNHost.Uuid].NetTopology, "foo")
|
|
||||||
assert.Equal(hosts[mockRawCDNHost.Uuid].Location, mockRawCDNHost.Location)
|
|
||||||
assert.Equal(hosts[mockRawCDNHost.Uuid].UploadLoadLimit.Load(), int32(10))
|
|
||||||
assert.Empty(hosts[mockRawCDNHost.Uuid].Peers)
|
|
||||||
assert.Equal(hosts[mockRawCDNHost.Uuid].IsCDN, true)
|
|
||||||
assert.NotEqual(hosts[mockRawCDNHost.Uuid].CreateAt.Load(), 0)
|
|
||||||
assert.NotEqual(hosts[mockRawCDNHost.Uuid].UpdateAt.Load(), 0)
|
|
||||||
assert.NotNil(hosts[mockRawCDNHost.Uuid].Log)
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "cdns covert to hosts without cluster config",
|
|
||||||
cdns: []*config.CDN{
|
|
||||||
{
|
|
||||||
ID: 1,
|
|
||||||
Hostname: mockRawCDNHost.HostName,
|
|
||||||
IP: mockRawCDNHost.Ip,
|
|
||||||
Port: mockRawCDNHost.RpcPort,
|
|
||||||
DownloadPort: mockRawCDNHost.DownPort,
|
|
||||||
Location: mockRawCDNHost.Location,
|
|
||||||
IDC: mockRawCDNHost.Idc,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
expect: func(t *testing.T, hosts map[string]*Host) {
|
|
||||||
assert := assert.New(t)
|
|
||||||
assert.Equal(hosts[mockRawCDNHost.Uuid].ID, mockRawCDNHost.Uuid)
|
|
||||||
assert.Equal(hosts[mockRawCDNHost.Uuid].Type, HostTypeSuperSeed)
|
|
||||||
assert.Equal(hosts[mockRawCDNHost.Uuid].IP, mockRawCDNHost.Ip)
|
|
||||||
assert.Equal(hosts[mockRawCDNHost.Uuid].Hostname, mockRawCDNHost.HostName)
|
|
||||||
assert.Equal(hosts[mockRawCDNHost.Uuid].Port, mockRawCDNHost.RpcPort)
|
|
||||||
assert.Equal(hosts[mockRawCDNHost.Uuid].DownloadPort, mockRawCDNHost.DownPort)
|
|
||||||
assert.Equal(hosts[mockRawCDNHost.Uuid].IDC, mockRawCDNHost.Idc)
|
|
||||||
assert.Equal(hosts[mockRawCDNHost.Uuid].NetTopology, "")
|
|
||||||
assert.Equal(hosts[mockRawCDNHost.Uuid].Location, mockRawCDNHost.Location)
|
|
||||||
assert.Equal(hosts[mockRawCDNHost.Uuid].UploadLoadLimit.Load(), int32(config.DefaultClientLoadLimit))
|
|
||||||
assert.Empty(hosts[mockRawCDNHost.Uuid].Peers)
|
|
||||||
assert.Equal(hosts[mockRawCDNHost.Uuid].IsCDN, true)
|
|
||||||
assert.NotEqual(hosts[mockRawCDNHost.Uuid].CreateAt.Load(), 0)
|
|
||||||
assert.NotEqual(hosts[mockRawCDNHost.Uuid].UpdateAt.Load(), 0)
|
|
||||||
assert.NotNil(hosts[mockRawCDNHost.Uuid].Log)
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "cdns is empty",
|
|
||||||
cdns: []*config.CDN{},
|
|
||||||
expect: func(t *testing.T, hosts map[string]*Host) {
|
|
||||||
assert := assert.New(t)
|
|
||||||
assert.Equal(len(hosts), 0)
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range tests {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
tc.expect(t, cdnsToHosts(tc.cdns))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSeedPeerClient_cdnsToNetAddrs(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
cdns []*config.CDN
|
|
||||||
expect func(t *testing.T, netAddrs []dfnet.NetAddr)
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "cdns covert to netAddr",
|
|
||||||
cdns: []*config.CDN{
|
|
||||||
{
|
|
||||||
ID: 1,
|
|
||||||
Hostname: mockRawCDNHost.HostName,
|
|
||||||
IP: mockRawCDNHost.Ip,
|
|
||||||
Port: mockRawCDNHost.RpcPort,
|
|
||||||
DownloadPort: mockRawCDNHost.DownPort,
|
|
||||||
Location: mockRawCDNHost.Location,
|
|
||||||
IDC: mockRawCDNHost.Idc,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
expect: func(t *testing.T, netAddrs []dfnet.NetAddr) {
|
|
||||||
assert := assert.New(t)
|
|
||||||
assert.Equal(netAddrs[0].Type, dfnet.TCP)
|
|
||||||
assert.Equal(netAddrs[0].Addr, fmt.Sprintf("%s:%d", mockRawCDNHost.Ip, mockRawCDNHost.RpcPort))
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "cdns is empty",
|
|
||||||
cdns: []*config.CDN{},
|
|
||||||
expect: func(t *testing.T, netAddrs []dfnet.NetAddr) {
|
|
||||||
assert := assert.New(t)
|
|
||||||
assert.Equal(len(netAddrs), 0)
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range tests {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
tc.expect(t, cdnsToNetAddrs(tc.cdns))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSeedPeerClient_diffCDNs(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
cx []*config.CDN
|
|
||||||
cy []*config.CDN
|
|
||||||
expect func(t *testing.T, diff []*config.CDN)
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "same cdn list",
|
|
||||||
cx: []*config.CDN{
|
|
||||||
{
|
|
||||||
ID: 1,
|
|
||||||
Hostname: "foo",
|
|
||||||
IP: "127.0.0.1",
|
|
||||||
Port: 8080,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
cy: []*config.CDN{
|
|
||||||
{
|
|
||||||
ID: 1,
|
|
||||||
Hostname: "foo",
|
|
||||||
IP: "127.0.0.1",
|
|
||||||
Port: 8080,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
expect: func(t *testing.T, diff []*config.CDN) {
|
|
||||||
assert := assert.New(t)
|
|
||||||
assert.EqualValues(diff, []*config.CDN(nil))
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "different hostname",
|
|
||||||
cx: []*config.CDN{
|
|
||||||
{
|
|
||||||
ID: 1,
|
|
||||||
Hostname: "bar",
|
|
||||||
IP: "127.0.0.1",
|
|
||||||
Port: 8080,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
cy: []*config.CDN{
|
|
||||||
{
|
|
||||||
ID: 1,
|
|
||||||
Hostname: "foo",
|
|
||||||
IP: "127.0.0.1",
|
|
||||||
Port: 8080,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
expect: func(t *testing.T, diff []*config.CDN) {
|
|
||||||
assert := assert.New(t)
|
|
||||||
assert.EqualValues(diff, []*config.CDN{
|
|
||||||
{
|
|
||||||
ID: 1,
|
|
||||||
Hostname: "bar",
|
|
||||||
IP: "127.0.0.1",
|
|
||||||
Port: 8080,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "different port",
|
|
||||||
cx: []*config.CDN{
|
|
||||||
{
|
|
||||||
ID: 1,
|
|
||||||
Hostname: "foo",
|
|
||||||
IP: "127.0.0.1",
|
|
||||||
Port: 8081,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
cy: []*config.CDN{
|
|
||||||
{
|
|
||||||
ID: 1,
|
|
||||||
Hostname: "foo",
|
|
||||||
IP: "127.0.0.1",
|
|
||||||
Port: 8080,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
expect: func(t *testing.T, diff []*config.CDN) {
|
|
||||||
assert := assert.New(t)
|
|
||||||
assert.EqualValues(diff, []*config.CDN{
|
|
||||||
{
|
|
||||||
ID: 1,
|
|
||||||
Hostname: "foo",
|
|
||||||
IP: "127.0.0.1",
|
|
||||||
Port: 8081,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "different ip",
|
|
||||||
cx: []*config.CDN{
|
|
||||||
{
|
|
||||||
ID: 1,
|
|
||||||
Hostname: "foo",
|
|
||||||
IP: "0.0.0.0",
|
|
||||||
Port: 8080,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
cy: []*config.CDN{
|
|
||||||
{
|
|
||||||
ID: 1,
|
|
||||||
Hostname: "foo",
|
|
||||||
IP: "127.0.0.1",
|
|
||||||
Port: 8080,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
expect: func(t *testing.T, diff []*config.CDN) {
|
|
||||||
assert := assert.New(t)
|
|
||||||
assert.EqualValues(diff, []*config.CDN{
|
|
||||||
{
|
|
||||||
ID: 1,
|
|
||||||
Hostname: "foo",
|
|
||||||
IP: "0.0.0.0",
|
|
||||||
Port: 8080,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "remove y cdn",
|
|
||||||
cx: []*config.CDN{
|
|
||||||
{
|
|
||||||
ID: 1,
|
|
||||||
Hostname: "foo",
|
|
||||||
IP: "127.0.0.1",
|
|
||||||
Port: 8080,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: 2,
|
|
||||||
Hostname: "bar",
|
|
||||||
IP: "127.0.0.1",
|
|
||||||
Port: 8080,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
cy: []*config.CDN{
|
|
||||||
{
|
|
||||||
ID: 1,
|
|
||||||
Hostname: "foo",
|
|
||||||
IP: "127.0.0.1",
|
|
||||||
Port: 8080,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
expect: func(t *testing.T, diff []*config.CDN) {
|
|
||||||
assert := assert.New(t)
|
|
||||||
assert.EqualValues(diff, []*config.CDN{
|
|
||||||
{
|
|
||||||
ID: 2,
|
|
||||||
Hostname: "bar",
|
|
||||||
IP: "127.0.0.1",
|
|
||||||
Port: 8080,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "remove x cdn",
|
|
||||||
cx: []*config.CDN{
|
|
||||||
{
|
|
||||||
ID: 1,
|
|
||||||
Hostname: "foo",
|
|
||||||
IP: "127.0.0.1",
|
|
||||||
Port: 8080,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
cy: []*config.CDN{
|
|
||||||
{
|
|
||||||
ID: 1,
|
|
||||||
Hostname: "baz",
|
|
||||||
IP: "127.0.0.1",
|
|
||||||
Port: 8080,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: 2,
|
|
||||||
Hostname: "bar",
|
|
||||||
IP: "127.0.0.1",
|
|
||||||
Port: 8080,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
expect: func(t *testing.T, diff []*config.CDN) {
|
|
||||||
assert := assert.New(t)
|
|
||||||
assert.EqualValues(diff, []*config.CDN{
|
|
||||||
{
|
|
||||||
ID: 1,
|
|
||||||
Hostname: "foo",
|
|
||||||
IP: "127.0.0.1",
|
|
||||||
Port: 8080,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range tests {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
tc.expect(t, diffCDNs(tc.cx, tc.cy))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
||||||
|
|
@ -30,7 +30,7 @@ import (
|
||||||
|
|
||||||
var (
|
var (
|
||||||
mockRawHost = &scheduler.PeerHost{
|
mockRawHost = &scheduler.PeerHost{
|
||||||
Uuid: idgen.HostID("hostname", 8003),
|
Id: idgen.HostID("hostname", 8003),
|
||||||
Ip: "127.0.0.1",
|
Ip: "127.0.0.1",
|
||||||
RpcPort: 8003,
|
RpcPort: 8003,
|
||||||
DownPort: 8001,
|
DownPort: 8001,
|
||||||
|
|
|
||||||
|
|
@ -49,7 +49,7 @@ var (
|
||||||
Algorithm: evaluator.DefaultAlgorithm,
|
Algorithm: evaluator.DefaultAlgorithm,
|
||||||
}
|
}
|
||||||
mockRawHost = &rpcscheduler.PeerHost{
|
mockRawHost = &rpcscheduler.PeerHost{
|
||||||
Uuid: idgen.HostID("hostname", 8003),
|
Id: idgen.HostID("hostname", 8003),
|
||||||
Ip: "127.0.0.1",
|
Ip: "127.0.0.1",
|
||||||
RpcPort: 8003,
|
RpcPort: 8003,
|
||||||
DownPort: 8001,
|
DownPort: 8001,
|
||||||
|
|
@ -61,7 +61,7 @@ var (
|
||||||
}
|
}
|
||||||
|
|
||||||
mockRawSeedHost = &rpcscheduler.PeerHost{
|
mockRawSeedHost = &rpcscheduler.PeerHost{
|
||||||
Uuid: idgen.SeedHostID("hostname", 8003),
|
Id: idgen.HostID("hostname_seed", 8003),
|
||||||
Ip: "127.0.0.1",
|
Ip: "127.0.0.1",
|
||||||
RpcPort: 8003,
|
RpcPort: 8003,
|
||||||
DownPort: 8001,
|
DownPort: 8001,
|
||||||
|
|
|
||||||
|
|
@ -532,7 +532,7 @@ func (s *Service) registerTask(ctx context.Context, req *rpcscheduler.PeerTaskRe
|
||||||
|
|
||||||
// registerHost creates a new host or reuses a previous host.
|
// registerHost creates a new host or reuses a previous host.
|
||||||
func (s *Service) registerHost(ctx context.Context, rawHost *rpcscheduler.PeerHost) *resource.Host {
|
func (s *Service) registerHost(ctx context.Context, rawHost *rpcscheduler.PeerHost) *resource.Host {
|
||||||
host, ok := s.resource.HostManager().Load(rawHost.Uuid)
|
host, ok := s.resource.HostManager().Load(rawHost.Id)
|
||||||
if !ok {
|
if !ok {
|
||||||
// Get scheduler cluster client config by manager.
|
// Get scheduler cluster client config by manager.
|
||||||
var options []resource.HostOption
|
var options []resource.HostOption
|
||||||
|
|
@ -844,7 +844,6 @@ func (s *Service) createRecord(peer *resource.Peer, peerState int, req *rpcsched
|
||||||
record.ParentLocation = parent.Host.Location
|
record.ParentLocation = parent.Host.Location
|
||||||
record.ParentFreeUploadLoad = parent.Host.FreeUploadLoad()
|
record.ParentFreeUploadLoad = parent.Host.FreeUploadLoad()
|
||||||
record.ParentHostType = int(parent.Host.Type)
|
record.ParentHostType = int(parent.Host.Type)
|
||||||
record.ParentIsCDN = parent.Host.IsCDN
|
|
||||||
record.ParentCreateAt = parent.CreateAt.Load().UnixNano()
|
record.ParentCreateAt = parent.CreateAt.Load().UnixNano()
|
||||||
record.ParentUpdateAt = parent.UpdateAt.Load().UnixNano()
|
record.ParentUpdateAt = parent.UpdateAt.Load().UnixNano()
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -58,7 +58,7 @@ var (
|
||||||
}
|
}
|
||||||
|
|
||||||
mockRawHost = &rpcscheduler.PeerHost{
|
mockRawHost = &rpcscheduler.PeerHost{
|
||||||
Uuid: idgen.HostID("hostname", 8003),
|
Id: idgen.HostID("hostname", 8003),
|
||||||
Ip: "127.0.0.1",
|
Ip: "127.0.0.1",
|
||||||
RpcPort: 8003,
|
RpcPort: 8003,
|
||||||
DownPort: 8001,
|
DownPort: 8001,
|
||||||
|
|
@ -70,7 +70,7 @@ var (
|
||||||
}
|
}
|
||||||
|
|
||||||
mockRawSeedHost = &rpcscheduler.PeerHost{
|
mockRawSeedHost = &rpcscheduler.PeerHost{
|
||||||
Uuid: idgen.SeedHostID("hostname", 8003),
|
Id: idgen.HostID("hostname_seed", 8003),
|
||||||
Ip: "127.0.0.1",
|
Ip: "127.0.0.1",
|
||||||
RpcPort: 8003,
|
RpcPort: 8003,
|
||||||
DownPort: 8001,
|
DownPort: 8001,
|
||||||
|
|
@ -166,7 +166,7 @@ func TestService_RegisterPeerTask(t *testing.T) {
|
||||||
req: &rpcscheduler.PeerTaskRequest{
|
req: &rpcscheduler.PeerTaskRequest{
|
||||||
UrlMeta: &base.UrlMeta{},
|
UrlMeta: &base.UrlMeta{},
|
||||||
PeerHost: &rpcscheduler.PeerHost{
|
PeerHost: &rpcscheduler.PeerHost{
|
||||||
Uuid: mockRawHost.Uuid,
|
Id: mockRawHost.Id,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
mock: func(
|
mock: func(
|
||||||
|
|
@ -198,7 +198,7 @@ func TestService_RegisterPeerTask(t *testing.T) {
|
||||||
req: &rpcscheduler.PeerTaskRequest{
|
req: &rpcscheduler.PeerTaskRequest{
|
||||||
UrlMeta: &base.UrlMeta{},
|
UrlMeta: &base.UrlMeta{},
|
||||||
PeerHost: &rpcscheduler.PeerHost{
|
PeerHost: &rpcscheduler.PeerHost{
|
||||||
Uuid: mockRawHost.Uuid,
|
Id: mockRawHost.Id,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
mock: func(
|
mock: func(
|
||||||
|
|
@ -231,7 +231,7 @@ func TestService_RegisterPeerTask(t *testing.T) {
|
||||||
req: &rpcscheduler.PeerTaskRequest{
|
req: &rpcscheduler.PeerTaskRequest{
|
||||||
UrlMeta: &base.UrlMeta{},
|
UrlMeta: &base.UrlMeta{},
|
||||||
PeerHost: &rpcscheduler.PeerHost{
|
PeerHost: &rpcscheduler.PeerHost{
|
||||||
Uuid: mockRawHost.Uuid,
|
Id: mockRawHost.Id,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
mock: func(
|
mock: func(
|
||||||
|
|
@ -265,7 +265,7 @@ func TestService_RegisterPeerTask(t *testing.T) {
|
||||||
req: &rpcscheduler.PeerTaskRequest{
|
req: &rpcscheduler.PeerTaskRequest{
|
||||||
UrlMeta: &base.UrlMeta{},
|
UrlMeta: &base.UrlMeta{},
|
||||||
PeerHost: &rpcscheduler.PeerHost{
|
PeerHost: &rpcscheduler.PeerHost{
|
||||||
Uuid: mockRawHost.Uuid,
|
Id: mockRawHost.Id,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
mock: func(
|
mock: func(
|
||||||
|
|
@ -303,7 +303,7 @@ func TestService_RegisterPeerTask(t *testing.T) {
|
||||||
req: &rpcscheduler.PeerTaskRequest{
|
req: &rpcscheduler.PeerTaskRequest{
|
||||||
UrlMeta: &base.UrlMeta{},
|
UrlMeta: &base.UrlMeta{},
|
||||||
PeerHost: &rpcscheduler.PeerHost{
|
PeerHost: &rpcscheduler.PeerHost{
|
||||||
Uuid: mockRawHost.Uuid,
|
Id: mockRawHost.Id,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
mock: func(
|
mock: func(
|
||||||
|
|
@ -339,7 +339,7 @@ func TestService_RegisterPeerTask(t *testing.T) {
|
||||||
req: &rpcscheduler.PeerTaskRequest{
|
req: &rpcscheduler.PeerTaskRequest{
|
||||||
UrlMeta: &base.UrlMeta{},
|
UrlMeta: &base.UrlMeta{},
|
||||||
PeerHost: &rpcscheduler.PeerHost{
|
PeerHost: &rpcscheduler.PeerHost{
|
||||||
Uuid: mockRawHost.Uuid,
|
Id: mockRawHost.Id,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
mock: func(
|
mock: func(
|
||||||
|
|
@ -375,7 +375,7 @@ func TestService_RegisterPeerTask(t *testing.T) {
|
||||||
req: &rpcscheduler.PeerTaskRequest{
|
req: &rpcscheduler.PeerTaskRequest{
|
||||||
UrlMeta: &base.UrlMeta{},
|
UrlMeta: &base.UrlMeta{},
|
||||||
PeerHost: &rpcscheduler.PeerHost{
|
PeerHost: &rpcscheduler.PeerHost{
|
||||||
Uuid: mockRawHost.Uuid,
|
Id: mockRawHost.Id,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
mock: func(
|
mock: func(
|
||||||
|
|
@ -411,7 +411,7 @@ func TestService_RegisterPeerTask(t *testing.T) {
|
||||||
req: &rpcscheduler.PeerTaskRequest{
|
req: &rpcscheduler.PeerTaskRequest{
|
||||||
UrlMeta: &base.UrlMeta{},
|
UrlMeta: &base.UrlMeta{},
|
||||||
PeerHost: &rpcscheduler.PeerHost{
|
PeerHost: &rpcscheduler.PeerHost{
|
||||||
Uuid: mockRawHost.Uuid,
|
Id: mockRawHost.Id,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
mock: func(
|
mock: func(
|
||||||
|
|
@ -453,7 +453,7 @@ func TestService_RegisterPeerTask(t *testing.T) {
|
||||||
req: &rpcscheduler.PeerTaskRequest{
|
req: &rpcscheduler.PeerTaskRequest{
|
||||||
UrlMeta: &base.UrlMeta{},
|
UrlMeta: &base.UrlMeta{},
|
||||||
PeerHost: &rpcscheduler.PeerHost{
|
PeerHost: &rpcscheduler.PeerHost{
|
||||||
Uuid: mockRawHost.Uuid,
|
Id: mockRawHost.Id,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
mock: func(
|
mock: func(
|
||||||
|
|
@ -494,7 +494,7 @@ func TestService_RegisterPeerTask(t *testing.T) {
|
||||||
req: &rpcscheduler.PeerTaskRequest{
|
req: &rpcscheduler.PeerTaskRequest{
|
||||||
UrlMeta: &base.UrlMeta{},
|
UrlMeta: &base.UrlMeta{},
|
||||||
PeerHost: &rpcscheduler.PeerHost{
|
PeerHost: &rpcscheduler.PeerHost{
|
||||||
Uuid: mockRawHost.Uuid,
|
Id: mockRawHost.Id,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
mock: func(
|
mock: func(
|
||||||
|
|
@ -535,7 +535,7 @@ func TestService_RegisterPeerTask(t *testing.T) {
|
||||||
req: &rpcscheduler.PeerTaskRequest{
|
req: &rpcscheduler.PeerTaskRequest{
|
||||||
UrlMeta: &base.UrlMeta{},
|
UrlMeta: &base.UrlMeta{},
|
||||||
PeerHost: &rpcscheduler.PeerHost{
|
PeerHost: &rpcscheduler.PeerHost{
|
||||||
Uuid: mockRawHost.Uuid,
|
Id: mockRawHost.Id,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
mock: func(
|
mock: func(
|
||||||
|
|
@ -576,7 +576,7 @@ func TestService_RegisterPeerTask(t *testing.T) {
|
||||||
req: &rpcscheduler.PeerTaskRequest{
|
req: &rpcscheduler.PeerTaskRequest{
|
||||||
UrlMeta: &base.UrlMeta{},
|
UrlMeta: &base.UrlMeta{},
|
||||||
PeerHost: &rpcscheduler.PeerHost{
|
PeerHost: &rpcscheduler.PeerHost{
|
||||||
Uuid: mockRawHost.Uuid,
|
Id: mockRawHost.Id,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
mock: func(
|
mock: func(
|
||||||
|
|
@ -611,7 +611,7 @@ func TestService_RegisterPeerTask(t *testing.T) {
|
||||||
req: &rpcscheduler.PeerTaskRequest{
|
req: &rpcscheduler.PeerTaskRequest{
|
||||||
UrlMeta: &base.UrlMeta{},
|
UrlMeta: &base.UrlMeta{},
|
||||||
PeerHost: &rpcscheduler.PeerHost{
|
PeerHost: &rpcscheduler.PeerHost{
|
||||||
Uuid: mockRawHost.Uuid,
|
Id: mockRawHost.Id,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
mock: func(
|
mock: func(
|
||||||
|
|
@ -1232,7 +1232,7 @@ func TestService_AnnounceTask(t *testing.T) {
|
||||||
Cid: mockCID,
|
Cid: mockCID,
|
||||||
UrlMeta: &base.UrlMeta{},
|
UrlMeta: &base.UrlMeta{},
|
||||||
PeerHost: &rpcscheduler.PeerHost{
|
PeerHost: &rpcscheduler.PeerHost{
|
||||||
Uuid: mockRawHost.Uuid,
|
Id: mockRawHost.Id,
|
||||||
},
|
},
|
||||||
PiecePacket: &base.PiecePacket{
|
PiecePacket: &base.PiecePacket{
|
||||||
PieceInfos: []*base.PieceInfo{{PieceNum: 1}},
|
PieceInfos: []*base.PieceInfo{{PieceNum: 1}},
|
||||||
|
|
@ -2028,12 +2028,12 @@ func TestService_registerHost(t *testing.T) {
|
||||||
mock: func(mockHost *resource.Host, hostManager resource.HostManager, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) {
|
mock: func(mockHost *resource.Host, hostManager resource.HostManager, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) {
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
mr.HostManager().Return(hostManager).Times(1),
|
mr.HostManager().Return(hostManager).Times(1),
|
||||||
mh.Load(gomock.Eq(mockRawHost.Uuid)).Return(mockHost, true).Times(1),
|
mh.Load(gomock.Eq(mockRawHost.Id)).Return(mockHost, true).Times(1),
|
||||||
)
|
)
|
||||||
},
|
},
|
||||||
expect: func(t *testing.T, host *resource.Host) {
|
expect: func(t *testing.T, host *resource.Host) {
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
assert.Equal(host.ID, mockRawHost.Uuid)
|
assert.Equal(host.ID, mockRawHost.Id)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
@ -2046,7 +2046,7 @@ func TestService_registerHost(t *testing.T) {
|
||||||
mock: func(mockHost *resource.Host, hostManager resource.HostManager, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) {
|
mock: func(mockHost *resource.Host, hostManager resource.HostManager, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) {
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
mr.HostManager().Return(hostManager).Times(1),
|
mr.HostManager().Return(hostManager).Times(1),
|
||||||
mh.Load(gomock.Eq(mockRawHost.Uuid)).Return(nil, false).Times(1),
|
mh.Load(gomock.Eq(mockRawHost.Id)).Return(nil, false).Times(1),
|
||||||
md.GetSchedulerClusterClientConfig().Return(types.SchedulerClusterClientConfig{LoadLimit: 10}, true).Times(1),
|
md.GetSchedulerClusterClientConfig().Return(types.SchedulerClusterClientConfig{LoadLimit: 10}, true).Times(1),
|
||||||
mr.HostManager().Return(hostManager).Times(1),
|
mr.HostManager().Return(hostManager).Times(1),
|
||||||
mh.Store(gomock.Any()).Return().Times(1),
|
mh.Store(gomock.Any()).Return().Times(1),
|
||||||
|
|
@ -2054,7 +2054,7 @@ func TestService_registerHost(t *testing.T) {
|
||||||
},
|
},
|
||||||
expect: func(t *testing.T, host *resource.Host) {
|
expect: func(t *testing.T, host *resource.Host) {
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
assert.Equal(host.ID, mockRawHost.Uuid)
|
assert.Equal(host.ID, mockRawHost.Id)
|
||||||
assert.Equal(host.UploadLoadLimit.Load(), int32(10))
|
assert.Equal(host.UploadLoadLimit.Load(), int32(10))
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
@ -2068,7 +2068,7 @@ func TestService_registerHost(t *testing.T) {
|
||||||
mock: func(mockHost *resource.Host, hostManager resource.HostManager, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) {
|
mock: func(mockHost *resource.Host, hostManager resource.HostManager, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) {
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
mr.HostManager().Return(hostManager).Times(1),
|
mr.HostManager().Return(hostManager).Times(1),
|
||||||
mh.Load(gomock.Eq(mockRawHost.Uuid)).Return(nil, false).Times(1),
|
mh.Load(gomock.Eq(mockRawHost.Id)).Return(nil, false).Times(1),
|
||||||
md.GetSchedulerClusterClientConfig().Return(types.SchedulerClusterClientConfig{}, false).Times(1),
|
md.GetSchedulerClusterClientConfig().Return(types.SchedulerClusterClientConfig{}, false).Times(1),
|
||||||
mr.HostManager().Return(hostManager).Times(1),
|
mr.HostManager().Return(hostManager).Times(1),
|
||||||
mh.Store(gomock.Any()).Return().Times(1),
|
mh.Store(gomock.Any()).Return().Times(1),
|
||||||
|
|
@ -2076,7 +2076,7 @@ func TestService_registerHost(t *testing.T) {
|
||||||
},
|
},
|
||||||
expect: func(t *testing.T, host *resource.Host) {
|
expect: func(t *testing.T, host *resource.Host) {
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
assert.Equal(host.ID, mockRawHost.Uuid)
|
assert.Equal(host.ID, mockRawHost.Id)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -167,9 +167,6 @@ type Record struct {
|
||||||
// ParentHostType is parent host type.
|
// ParentHostType is parent host type.
|
||||||
ParentHostType int `csv:"parentHostType"`
|
ParentHostType int `csv:"parentHostType"`
|
||||||
|
|
||||||
// ParentIsCDN is used as tag cdn.
|
|
||||||
ParentIsCDN bool `csv:"parentIsCDN"`
|
|
||||||
|
|
||||||
// ParentCreateAt is parent peer create nanosecond time.
|
// ParentCreateAt is parent peer create nanosecond time.
|
||||||
ParentCreateAt int64 `csv:"parentCreateAt"`
|
ParentCreateAt int64 `csv:"parentCreateAt"`
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -266,7 +266,6 @@ func TestStorage_List(t *testing.T) {
|
||||||
ParentNetTopology: "parent_net_topology",
|
ParentNetTopology: "parent_net_topology",
|
||||||
ParentLocation: "parent_location",
|
ParentLocation: "parent_location",
|
||||||
ParentFreeUploadLoad: 1,
|
ParentFreeUploadLoad: 1,
|
||||||
ParentIsCDN: true,
|
|
||||||
ParentCreateAt: time.Now().UnixNano(),
|
ParentCreateAt: time.Now().UnixNano(),
|
||||||
ParentUpdateAt: time.Now().UnixNano(),
|
ParentUpdateAt: time.Now().UnixNano(),
|
||||||
},
|
},
|
||||||
|
|
|
||||||
|
|
@ -30,7 +30,7 @@ const (
|
||||||
const (
|
const (
|
||||||
managerServerName = "manager"
|
managerServerName = "manager"
|
||||||
schedulerServerName = "scheduler"
|
schedulerServerName = "scheduler"
|
||||||
cdnServerName = "cdn"
|
seedPeerServerName = "seed-peer"
|
||||||
dfdaemonServerName = "dfdaemon"
|
dfdaemonServerName = "dfdaemon"
|
||||||
proxyServerName = "proxy"
|
proxyServerName = "proxy"
|
||||||
)
|
)
|
||||||
|
|
@ -56,10 +56,10 @@ var servers = map[string]server{
|
||||||
logDirName: schedulerServerName,
|
logDirName: schedulerServerName,
|
||||||
replicas: 3,
|
replicas: 3,
|
||||||
},
|
},
|
||||||
cdnServerName: {
|
seedPeerServerName: {
|
||||||
name: cdnServerName,
|
name: seedPeerServerName,
|
||||||
namespace: dragonflyNamespace,
|
namespace: dragonflyNamespace,
|
||||||
logDirName: cdnServerName,
|
logDirName: "daemon",
|
||||||
replicas: 3,
|
replicas: 3,
|
||||||
},
|
},
|
||||||
dfdaemonServerName: {
|
dfdaemonServerName: {
|
||||||
|
|
|
||||||
|
|
@ -17,7 +17,7 @@
|
||||||
package manager
|
package manager
|
||||||
|
|
||||||
const (
|
const (
|
||||||
cdnCachePath = "/tmp/cdn/download"
|
seedPeerDataPath = "/var/lib/dragonfly"
|
||||||
|
|
||||||
managerService = "dragonfly-manager.dragonfly-system.svc"
|
managerService = "dragonfly-manager.dragonfly-system.svc"
|
||||||
managerPort = "8080"
|
managerPort = "8080"
|
||||||
|
|
@ -26,7 +26,4 @@ const (
|
||||||
|
|
||||||
dragonflyNamespace = "dragonfly-system"
|
dragonflyNamespace = "dragonfly-system"
|
||||||
e2eNamespace = "dragonfly-e2e"
|
e2eNamespace = "dragonfly-e2e"
|
||||||
|
|
||||||
proxy = "localhost:65001"
|
|
||||||
hostnameFilePath = "/etc/hostname"
|
|
||||||
)
|
)
|
||||||
|
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue