feat: add seed peer logic (#1302)
* feat: announce seed peer Signed-off-by: Gaius <gaius.qi@gmail.com> * feat: remove cdn logic Signed-off-by: Gaius <gaius.qi@gmail.com> * feat: remove cdn job Signed-off-by: Gaius <gaius.qi@gmail.com> * feat: dfdaemon change host uuid to host id Signed-off-by: Gaius <gaius.qi@gmail.com> * feat: go generate mocks Signed-off-by: Gaius <gaius.qi@gmail.com> * feat: remove cdn compatibility Signed-off-by: Gaius <gaius.qi@gmail.com> * feat: change docker compose Signed-off-by: Gaius <gaius.qi@gmail.com> * fix: reuse panic Signed-off-by: Gaius <gaius.qi@gmail.com> * feat: compatible with v2.0.3-beta.2 Signed-off-by: Gaius <gaius.qi@gmail.com>
This commit is contained in:
parent
26cd8f0e22
commit
faa5e4e465
|
|
@ -15,7 +15,7 @@ env:
|
|||
KIND_VERSION: v0.11.1
|
||||
CONTAINERD_VERSION: v1.5.2
|
||||
KIND_CONFIG_PATH: test/testdata/kind/config.yaml
|
||||
DRAGONFLY_STABLE_IMAGE_TAG: v2.0.2
|
||||
DRAGONFLY_STABLE_IMAGE_TAG: v2.0.3-beta.2
|
||||
DRAGONFLY_CHARTS_PATH: deploy/helm-charts/charts/dragonfly
|
||||
DRAGONFLY_CHARTS_CONFIG_PATH: test/testdata/charts/config.yaml
|
||||
DRAGONFLY_FILE_SERVER_PATH: test/testdata/k8s/file-server.yaml
|
||||
|
|
@ -28,7 +28,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
module: ["manager", "scheduler", "cdn", "dfdaemon"]
|
||||
module: ["manager", "scheduler", "dfdaemon"]
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
|
|
|
|||
|
|
@ -4,9 +4,12 @@ run:
|
|||
|
||||
linters-settings:
|
||||
gocyclo:
|
||||
min-complexity: 42
|
||||
min-complexity: 44
|
||||
gci:
|
||||
local-prefixes: d7y.io/dragonfly/v2
|
||||
sections:
|
||||
- standard
|
||||
- default
|
||||
- prefix(d7y.io/dragonfly/v2)
|
||||
|
||||
issues:
|
||||
new: true
|
||||
|
|
|
|||
1017
api/manager/docs.go
1017
api/manager/docs.go
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
|
@ -4,12 +4,12 @@ definitions:
|
|||
properties:
|
||||
bio:
|
||||
type: string
|
||||
cdn_clusters:
|
||||
items:
|
||||
$ref: '#/definitions/model.CDNCluster'
|
||||
type: array
|
||||
created_at:
|
||||
type: string
|
||||
download_rate_limit:
|
||||
type: integer
|
||||
id:
|
||||
type: integer
|
||||
name:
|
||||
type: string
|
||||
scheduler_clusters:
|
||||
|
|
@ -22,6 +22,8 @@ definitions:
|
|||
type: array
|
||||
state:
|
||||
type: string
|
||||
updated_at:
|
||||
type: string
|
||||
url:
|
||||
type: string
|
||||
user:
|
||||
|
|
@ -29,80 +31,18 @@ definitions:
|
|||
user_id:
|
||||
type: integer
|
||||
type: object
|
||||
model.Assertion:
|
||||
properties:
|
||||
key:
|
||||
type: string
|
||||
policy:
|
||||
items:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
type: array
|
||||
policyMap:
|
||||
additionalProperties:
|
||||
type: integer
|
||||
type: object
|
||||
rm: {}
|
||||
tokens:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
value:
|
||||
type: string
|
||||
type: object
|
||||
model.AssertionMap:
|
||||
additionalProperties:
|
||||
$ref: '#/definitions/model.Assertion'
|
||||
type: object
|
||||
model.CDN:
|
||||
properties:
|
||||
cdnclusterID:
|
||||
type: integer
|
||||
download_port:
|
||||
type: integer
|
||||
host_name:
|
||||
type: string
|
||||
idc:
|
||||
type: string
|
||||
ip:
|
||||
type: string
|
||||
location:
|
||||
type: string
|
||||
port:
|
||||
type: integer
|
||||
state:
|
||||
type: string
|
||||
type: object
|
||||
model.CDNCluster:
|
||||
properties:
|
||||
application_id:
|
||||
type: integer
|
||||
bio:
|
||||
type: string
|
||||
config:
|
||||
$ref: '#/definitions/model.JSONMap'
|
||||
is_default:
|
||||
type: boolean
|
||||
jobs:
|
||||
items:
|
||||
$ref: '#/definitions/model.Job'
|
||||
type: array
|
||||
name:
|
||||
type: string
|
||||
scheduler_clusters:
|
||||
items:
|
||||
$ref: '#/definitions/model.SchedulerCluster'
|
||||
type: array
|
||||
security_group_id:
|
||||
type: integer
|
||||
type: object
|
||||
model.Config:
|
||||
properties:
|
||||
bio:
|
||||
type: string
|
||||
created_at:
|
||||
type: string
|
||||
id:
|
||||
type: integer
|
||||
name:
|
||||
type: string
|
||||
updated_at:
|
||||
type: string
|
||||
user_id:
|
||||
type: integer
|
||||
value:
|
||||
|
|
@ -117,10 +57,10 @@ definitions:
|
|||
$ref: '#/definitions/model.JSONMap'
|
||||
bio:
|
||||
type: string
|
||||
cdn_clusters:
|
||||
items:
|
||||
$ref: '#/definitions/model.CDNCluster'
|
||||
type: array
|
||||
created_at:
|
||||
type: string
|
||||
id:
|
||||
type: integer
|
||||
result:
|
||||
$ref: '#/definitions/model.JSONMap'
|
||||
scheduler_clusters:
|
||||
|
|
@ -137,6 +77,8 @@ definitions:
|
|||
type: string
|
||||
type:
|
||||
type: string
|
||||
updated_at:
|
||||
type: string
|
||||
user_id:
|
||||
type: integer
|
||||
type: object
|
||||
|
|
@ -148,15 +90,25 @@ definitions:
|
|||
type: string
|
||||
client_secret:
|
||||
type: string
|
||||
created_at:
|
||||
type: string
|
||||
id:
|
||||
type: integer
|
||||
name:
|
||||
type: string
|
||||
redirect_url:
|
||||
type: string
|
||||
updated_at:
|
||||
type: string
|
||||
type: object
|
||||
model.Scheduler:
|
||||
properties:
|
||||
created_at:
|
||||
type: string
|
||||
host_name:
|
||||
type: string
|
||||
id:
|
||||
type: integer
|
||||
idc:
|
||||
type: string
|
||||
ip:
|
||||
|
|
@ -171,6 +123,8 @@ definitions:
|
|||
type: integer
|
||||
state:
|
||||
type: string
|
||||
updated_at:
|
||||
type: string
|
||||
type: object
|
||||
model.SchedulerCluster:
|
||||
properties:
|
||||
|
|
@ -178,14 +132,14 @@ definitions:
|
|||
type: integer
|
||||
bio:
|
||||
type: string
|
||||
cdn_clusters:
|
||||
items:
|
||||
$ref: '#/definitions/model.CDNCluster'
|
||||
type: array
|
||||
client_config:
|
||||
$ref: '#/definitions/model.JSONMap'
|
||||
config:
|
||||
$ref: '#/definitions/model.JSONMap'
|
||||
created_at:
|
||||
type: string
|
||||
id:
|
||||
type: integer
|
||||
is_default:
|
||||
type: boolean
|
||||
jobs:
|
||||
|
|
@ -202,24 +156,36 @@ definitions:
|
|||
items:
|
||||
$ref: '#/definitions/model.SeedPeerCluster'
|
||||
type: array
|
||||
updated_at:
|
||||
type: string
|
||||
type: object
|
||||
model.SecurityGroup:
|
||||
properties:
|
||||
bio:
|
||||
type: string
|
||||
created_at:
|
||||
type: string
|
||||
id:
|
||||
type: integer
|
||||
name:
|
||||
type: string
|
||||
security_rules:
|
||||
items:
|
||||
$ref: '#/definitions/model.SecurityRule'
|
||||
type: array
|
||||
updated_at:
|
||||
type: string
|
||||
type: object
|
||||
model.SecurityRule:
|
||||
properties:
|
||||
bio:
|
||||
type: string
|
||||
created_at:
|
||||
type: string
|
||||
domain:
|
||||
type: string
|
||||
id:
|
||||
type: integer
|
||||
name:
|
||||
type: string
|
||||
proxy_domain:
|
||||
|
|
@ -228,17 +194,25 @@ definitions:
|
|||
items:
|
||||
$ref: '#/definitions/model.SecurityGroup'
|
||||
type: array
|
||||
updated_at:
|
||||
type: string
|
||||
type: object
|
||||
model.SeedPeer:
|
||||
properties:
|
||||
created_at:
|
||||
type: string
|
||||
download_port:
|
||||
type: integer
|
||||
host_name:
|
||||
type: string
|
||||
id:
|
||||
type: integer
|
||||
idc:
|
||||
type: string
|
||||
ip:
|
||||
type: string
|
||||
is_cdn:
|
||||
type: boolean
|
||||
location:
|
||||
type: string
|
||||
net_topology:
|
||||
|
|
@ -251,6 +225,8 @@ definitions:
|
|||
type: string
|
||||
type:
|
||||
type: string
|
||||
updated_at:
|
||||
type: string
|
||||
type: object
|
||||
model.SeedPeerCluster:
|
||||
properties:
|
||||
|
|
@ -260,6 +236,10 @@ definitions:
|
|||
type: string
|
||||
config:
|
||||
$ref: '#/definitions/model.JSONMap'
|
||||
created_at:
|
||||
type: string
|
||||
id:
|
||||
type: integer
|
||||
is_default:
|
||||
type: boolean
|
||||
jobs:
|
||||
|
|
@ -276,6 +256,8 @@ definitions:
|
|||
$ref: '#/definitions/model.JSONMap'
|
||||
security_group_id:
|
||||
type: integer
|
||||
updated_at:
|
||||
type: string
|
||||
type: object
|
||||
model.User:
|
||||
properties:
|
||||
|
|
@ -283,8 +265,12 @@ definitions:
|
|||
type: string
|
||||
bio:
|
||||
type: string
|
||||
created_at:
|
||||
type: string
|
||||
email:
|
||||
type: string
|
||||
id:
|
||||
type: integer
|
||||
location:
|
||||
type: string
|
||||
name:
|
||||
|
|
@ -293,6 +279,8 @@ definitions:
|
|||
type: string
|
||||
state:
|
||||
type: string
|
||||
updated_at:
|
||||
type: string
|
||||
type: object
|
||||
rbac.Permission:
|
||||
properties:
|
||||
|
|
@ -320,15 +308,6 @@ definitions:
|
|||
- action
|
||||
- object
|
||||
type: object
|
||||
types.CDNClusterConfig:
|
||||
properties:
|
||||
load_limit:
|
||||
maximum: 5000
|
||||
minimum: 1
|
||||
type: integer
|
||||
net_topology:
|
||||
type: string
|
||||
type: object
|
||||
types.CreateApplicationRequest:
|
||||
properties:
|
||||
bio:
|
||||
|
|
@ -350,43 +329,6 @@ definitions:
|
|||
- name
|
||||
- user_id
|
||||
type: object
|
||||
types.CreateCDNClusterRequest:
|
||||
properties:
|
||||
bio:
|
||||
type: string
|
||||
config:
|
||||
$ref: '#/definitions/types.CDNClusterConfig'
|
||||
is_default:
|
||||
type: boolean
|
||||
name:
|
||||
type: string
|
||||
required:
|
||||
- config
|
||||
- name
|
||||
type: object
|
||||
types.CreateCDNRequest:
|
||||
properties:
|
||||
cdn_cluster_id:
|
||||
type: integer
|
||||
download_port:
|
||||
type: integer
|
||||
host_name:
|
||||
type: string
|
||||
idc:
|
||||
type: string
|
||||
ip:
|
||||
type: string
|
||||
location:
|
||||
type: string
|
||||
port:
|
||||
type: integer
|
||||
required:
|
||||
- cdn_cluster_id
|
||||
- download_port
|
||||
- host_name
|
||||
- ip
|
||||
- port
|
||||
type: object
|
||||
types.CreateConfigRequest:
|
||||
properties:
|
||||
bio:
|
||||
|
|
@ -409,10 +351,6 @@ definitions:
|
|||
type: object
|
||||
bio:
|
||||
type: string
|
||||
cdn_cluster_ids:
|
||||
items:
|
||||
type: integer
|
||||
type: array
|
||||
result:
|
||||
additionalProperties: true
|
||||
type: object
|
||||
|
|
@ -467,8 +405,6 @@ definitions:
|
|||
properties:
|
||||
bio:
|
||||
type: string
|
||||
cdn_cluster_id:
|
||||
type: integer
|
||||
client_config:
|
||||
$ref: '#/definitions/types.SchedulerClusterClientConfig'
|
||||
config:
|
||||
|
|
@ -566,6 +502,10 @@ definitions:
|
|||
seed_peer_cluster_id:
|
||||
type: integer
|
||||
type:
|
||||
enum:
|
||||
- super
|
||||
- strong
|
||||
- weak
|
||||
type: string
|
||||
required:
|
||||
- download_port
|
||||
|
|
@ -725,32 +665,6 @@ definitions:
|
|||
required:
|
||||
- user_id
|
||||
type: object
|
||||
types.UpdateCDNClusterRequest:
|
||||
properties:
|
||||
bio:
|
||||
type: string
|
||||
config:
|
||||
$ref: '#/definitions/types.CDNClusterConfig'
|
||||
is_default:
|
||||
type: boolean
|
||||
name:
|
||||
type: string
|
||||
type: object
|
||||
types.UpdateCDNRequest:
|
||||
properties:
|
||||
cdn_cluster_id:
|
||||
type: integer
|
||||
download_port:
|
||||
type: integer
|
||||
idc:
|
||||
type: string
|
||||
ip:
|
||||
type: string
|
||||
location:
|
||||
type: string
|
||||
port:
|
||||
type: integer
|
||||
type: object
|
||||
types.UpdateConfigRequest:
|
||||
properties:
|
||||
bio:
|
||||
|
|
@ -789,8 +703,6 @@ definitions:
|
|||
properties:
|
||||
bio:
|
||||
type: string
|
||||
cdn_cluster_id:
|
||||
type: integer
|
||||
client_config:
|
||||
$ref: '#/definitions/types.SchedulerClusterClientConfig'
|
||||
config:
|
||||
|
|
@ -869,6 +781,10 @@ definitions:
|
|||
seed_peer_cluster_id:
|
||||
type: integer
|
||||
type:
|
||||
enum:
|
||||
- super
|
||||
- strong
|
||||
- weak
|
||||
type: string
|
||||
type: object
|
||||
types.UpdateUserRequest:
|
||||
|
|
@ -1042,65 +958,6 @@ paths:
|
|||
summary: Update Application
|
||||
tags:
|
||||
- Application
|
||||
/applications/{id}/cdn-clusters/{cdn_cluster_id}:
|
||||
delete:
|
||||
consumes:
|
||||
- application/json
|
||||
description: Delete CDN to Application
|
||||
parameters:
|
||||
- description: id
|
||||
in: path
|
||||
name: id
|
||||
required: true
|
||||
type: string
|
||||
- description: cdn cluster id
|
||||
in: path
|
||||
name: cdn_cluster_id
|
||||
required: true
|
||||
type: string
|
||||
produces:
|
||||
- application/json
|
||||
responses:
|
||||
"200":
|
||||
description: ""
|
||||
"400":
|
||||
description: ""
|
||||
"404":
|
||||
description: ""
|
||||
"500":
|
||||
description: ""
|
||||
summary: Delete CDN to Application
|
||||
tags:
|
||||
- Application
|
||||
put:
|
||||
consumes:
|
||||
- application/json
|
||||
description: Add CDN to Application
|
||||
parameters:
|
||||
- description: id
|
||||
in: path
|
||||
name: id
|
||||
required: true
|
||||
type: string
|
||||
- description: cdn cluster id
|
||||
in: path
|
||||
name: cdn_cluster_id
|
||||
required: true
|
||||
type: string
|
||||
produces:
|
||||
- application/json
|
||||
responses:
|
||||
"200":
|
||||
description: ""
|
||||
"400":
|
||||
description: ""
|
||||
"404":
|
||||
description: ""
|
||||
"500":
|
||||
description: ""
|
||||
summary: Add CDN to Application
|
||||
tags:
|
||||
- Application
|
||||
/applications/{id}/scheduler-clusters/{scheduler_cluster_id}:
|
||||
delete:
|
||||
consumes:
|
||||
|
|
@ -1219,362 +1076,6 @@ paths:
|
|||
summary: Add SeedPeer to Application
|
||||
tags:
|
||||
- Application
|
||||
/cdn-clusters:
|
||||
get:
|
||||
consumes:
|
||||
- application/json
|
||||
description: Get CDNClusters
|
||||
parameters:
|
||||
- default: 0
|
||||
description: current page
|
||||
in: query
|
||||
name: page
|
||||
required: true
|
||||
type: integer
|
||||
- default: 10
|
||||
description: return max item count, default 10, max 50
|
||||
in: query
|
||||
maximum: 50
|
||||
minimum: 2
|
||||
name: per_page
|
||||
required: true
|
||||
type: integer
|
||||
produces:
|
||||
- application/json
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
schema:
|
||||
items:
|
||||
$ref: '#/definitions/model.CDNCluster'
|
||||
type: array
|
||||
"400":
|
||||
description: ""
|
||||
"404":
|
||||
description: ""
|
||||
"500":
|
||||
description: ""
|
||||
summary: Get CDNClusters
|
||||
tags:
|
||||
- CDNCluster
|
||||
post:
|
||||
consumes:
|
||||
- application/json
|
||||
description: create by json config
|
||||
parameters:
|
||||
- description: DNCluster
|
||||
in: body
|
||||
name: CDNCluster
|
||||
required: true
|
||||
schema:
|
||||
$ref: '#/definitions/types.CreateCDNClusterRequest'
|
||||
produces:
|
||||
- application/json
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
schema:
|
||||
$ref: '#/definitions/model.CDNCluster'
|
||||
"400":
|
||||
description: ""
|
||||
"404":
|
||||
description: ""
|
||||
"500":
|
||||
description: ""
|
||||
summary: Create CDNCluster
|
||||
tags:
|
||||
- CDNCluster
|
||||
/cdn-clusters/{id}:
|
||||
delete:
|
||||
consumes:
|
||||
- application/json
|
||||
description: Destroy by id
|
||||
parameters:
|
||||
- description: id
|
||||
in: path
|
||||
name: id
|
||||
required: true
|
||||
type: string
|
||||
produces:
|
||||
- application/json
|
||||
responses:
|
||||
"200":
|
||||
description: ""
|
||||
"400":
|
||||
description: ""
|
||||
"404":
|
||||
description: ""
|
||||
"500":
|
||||
description: ""
|
||||
summary: Destroy CDNCluster
|
||||
tags:
|
||||
- CDNCluster
|
||||
get:
|
||||
consumes:
|
||||
- application/json
|
||||
description: Get CDNCluster by id
|
||||
parameters:
|
||||
- description: id
|
||||
in: path
|
||||
name: id
|
||||
required: true
|
||||
type: string
|
||||
produces:
|
||||
- application/json
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
schema:
|
||||
$ref: '#/definitions/model.CDNCluster'
|
||||
"400":
|
||||
description: ""
|
||||
"404":
|
||||
description: ""
|
||||
"500":
|
||||
description: ""
|
||||
summary: Get CDNCluster
|
||||
tags:
|
||||
- CDNCluster
|
||||
patch:
|
||||
consumes:
|
||||
- application/json
|
||||
description: Update by json config
|
||||
parameters:
|
||||
- description: id
|
||||
in: path
|
||||
name: id
|
||||
required: true
|
||||
type: string
|
||||
- description: CDNCluster
|
||||
in: body
|
||||
name: CDNCluster
|
||||
required: true
|
||||
schema:
|
||||
$ref: '#/definitions/types.UpdateCDNClusterRequest'
|
||||
produces:
|
||||
- application/json
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
schema:
|
||||
$ref: '#/definitions/model.CDNCluster'
|
||||
"400":
|
||||
description: ""
|
||||
"404":
|
||||
description: ""
|
||||
"500":
|
||||
description: ""
|
||||
summary: Update CDNCluster
|
||||
tags:
|
||||
- CDNCluster
|
||||
/cdn-clusters/{id}/cdns/{cdn_id}:
|
||||
put:
|
||||
consumes:
|
||||
- application/json
|
||||
description: Add CDN to CDNCluster
|
||||
parameters:
|
||||
- description: id
|
||||
in: path
|
||||
name: id
|
||||
required: true
|
||||
type: string
|
||||
- description: cdn id
|
||||
in: path
|
||||
name: cdn_id
|
||||
required: true
|
||||
type: string
|
||||
produces:
|
||||
- application/json
|
||||
responses:
|
||||
"200":
|
||||
description: ""
|
||||
"400":
|
||||
description: ""
|
||||
"404":
|
||||
description: ""
|
||||
"500":
|
||||
description: ""
|
||||
summary: Add Instance to CDNCluster
|
||||
tags:
|
||||
- CDNCluster
|
||||
/cdn-clusters/{id}/scheduler-clusters/{scheduler_cluster_id}:
|
||||
put:
|
||||
consumes:
|
||||
- application/json
|
||||
description: Add SchedulerCluster to CDNCluster
|
||||
parameters:
|
||||
- description: id
|
||||
in: path
|
||||
name: id
|
||||
required: true
|
||||
type: string
|
||||
- description: scheduler cluster id
|
||||
in: path
|
||||
name: scheduler_cluster_id
|
||||
required: true
|
||||
type: string
|
||||
produces:
|
||||
- application/json
|
||||
responses:
|
||||
"200":
|
||||
description: ""
|
||||
"400":
|
||||
description: ""
|
||||
"404":
|
||||
description: ""
|
||||
"500":
|
||||
description: ""
|
||||
summary: Add SchedulerCluster to CDNCluster
|
||||
tags:
|
||||
- CDNCluster
|
||||
/cdns:
|
||||
get:
|
||||
consumes:
|
||||
- application/json
|
||||
description: Get CDNs
|
||||
parameters:
|
||||
- default: 0
|
||||
description: current page
|
||||
in: query
|
||||
name: page
|
||||
required: true
|
||||
type: integer
|
||||
- default: 10
|
||||
description: return max item count, default 10, max 50
|
||||
in: query
|
||||
maximum: 50
|
||||
minimum: 2
|
||||
name: per_page
|
||||
required: true
|
||||
type: integer
|
||||
produces:
|
||||
- application/json
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
schema:
|
||||
items:
|
||||
$ref: '#/definitions/model.CDN'
|
||||
type: array
|
||||
"400":
|
||||
description: ""
|
||||
"404":
|
||||
description: ""
|
||||
"500":
|
||||
description: ""
|
||||
summary: Get CDNs
|
||||
tags:
|
||||
- CDN
|
||||
post:
|
||||
consumes:
|
||||
- application/json
|
||||
description: create by json config
|
||||
parameters:
|
||||
- description: CDN
|
||||
in: body
|
||||
name: CDN
|
||||
required: true
|
||||
schema:
|
||||
$ref: '#/definitions/types.CreateCDNRequest'
|
||||
produces:
|
||||
- application/json
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
schema:
|
||||
$ref: '#/definitions/model.CDN'
|
||||
"400":
|
||||
description: ""
|
||||
"404":
|
||||
description: ""
|
||||
"500":
|
||||
description: ""
|
||||
summary: Create CDN
|
||||
tags:
|
||||
- CDN
|
||||
/cdns/{id}:
|
||||
delete:
|
||||
consumes:
|
||||
- application/json
|
||||
description: Destroy by id
|
||||
parameters:
|
||||
- description: id
|
||||
in: path
|
||||
name: id
|
||||
required: true
|
||||
type: string
|
||||
produces:
|
||||
- application/json
|
||||
responses:
|
||||
"200":
|
||||
description: ""
|
||||
"400":
|
||||
description: ""
|
||||
"404":
|
||||
description: ""
|
||||
"500":
|
||||
description: ""
|
||||
summary: Destroy CDN
|
||||
tags:
|
||||
- CDN
|
||||
get:
|
||||
consumes:
|
||||
- application/json
|
||||
description: Get CDN by id
|
||||
parameters:
|
||||
- description: id
|
||||
in: path
|
||||
name: id
|
||||
required: true
|
||||
type: string
|
||||
produces:
|
||||
- application/json
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
schema:
|
||||
$ref: '#/definitions/model.CDN'
|
||||
"400":
|
||||
description: ""
|
||||
"404":
|
||||
description: ""
|
||||
"500":
|
||||
description: ""
|
||||
summary: Get CDN
|
||||
tags:
|
||||
- CDN
|
||||
patch:
|
||||
consumes:
|
||||
- application/json
|
||||
description: Update by json config
|
||||
parameters:
|
||||
- description: id
|
||||
in: path
|
||||
name: id
|
||||
required: true
|
||||
type: string
|
||||
- description: CDN
|
||||
in: body
|
||||
name: CDN
|
||||
required: true
|
||||
schema:
|
||||
$ref: '#/definitions/types.UpdateCDNRequest'
|
||||
produces:
|
||||
- application/json
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
schema:
|
||||
$ref: '#/definitions/model.CDN'
|
||||
"400":
|
||||
description: ""
|
||||
"404":
|
||||
description: ""
|
||||
"500":
|
||||
description: ""
|
||||
summary: Update CDN
|
||||
tags:
|
||||
- CDN
|
||||
/configs:
|
||||
get:
|
||||
consumes:
|
||||
|
|
@ -2704,36 +2205,6 @@ paths:
|
|||
summary: Update SecurityGroup
|
||||
tags:
|
||||
- SecurityGroup
|
||||
/security-groups/{id}/cdn-clusters/{cdn_cluster_id}:
|
||||
put:
|
||||
consumes:
|
||||
- application/json
|
||||
description: Add CDN to SecurityGroup
|
||||
parameters:
|
||||
- description: id
|
||||
in: path
|
||||
name: id
|
||||
required: true
|
||||
type: string
|
||||
- description: cdn cluster id
|
||||
in: path
|
||||
name: cdn_cluster_id
|
||||
required: true
|
||||
type: string
|
||||
produces:
|
||||
- application/json
|
||||
responses:
|
||||
"200":
|
||||
description: ""
|
||||
"400":
|
||||
description: ""
|
||||
"404":
|
||||
description: ""
|
||||
"500":
|
||||
description: ""
|
||||
summary: Add CDN to SecurityGroup
|
||||
tags:
|
||||
- SecurityGroup
|
||||
/security-groups/{id}/scheduler-clusters/{scheduler_cluster_id}:
|
||||
put:
|
||||
consumes:
|
||||
|
|
|
|||
18
cdn/cdn.go
18
cdn/cdn.go
|
|
@ -37,6 +37,7 @@ import (
|
|||
"d7y.io/dragonfly/v2/cdn/supervisor/task"
|
||||
"d7y.io/dragonfly/v2/client/daemon/upload"
|
||||
logger "d7y.io/dragonfly/v2/internal/dflog"
|
||||
"d7y.io/dragonfly/v2/manager/model"
|
||||
"d7y.io/dragonfly/v2/pkg/rpc/manager"
|
||||
managerClient "d7y.io/dragonfly/v2/pkg/rpc/manager/client"
|
||||
"d7y.io/dragonfly/v2/pkg/util/hostutils"
|
||||
|
|
@ -158,15 +159,18 @@ func (s *Server) Serve() error {
|
|||
go func() {
|
||||
if s.configServer != nil {
|
||||
var rpcServerConfig = s.grpcServer.GetConfig()
|
||||
CDNInstance, err := s.configServer.UpdateCDN(&manager.UpdateCDNRequest{
|
||||
SourceType: manager.SourceType_CDN_SOURCE,
|
||||
CDNInstance, err := s.configServer.UpdateSeedPeer(&manager.UpdateSeedPeerRequest{
|
||||
SourceType: manager.SourceType_SEED_PEER_SOURCE,
|
||||
HostName: hostutils.FQDNHostname,
|
||||
Type: model.SeedPeerTypeSuperSeed,
|
||||
IsCdn: true,
|
||||
Idc: s.config.Host.IDC,
|
||||
NetTopology: s.config.Host.NetTopology,
|
||||
Location: s.config.Host.Location,
|
||||
Ip: rpcServerConfig.AdvertiseIP,
|
||||
Port: int32(rpcServerConfig.ListenPort),
|
||||
DownloadPort: int32(rpcServerConfig.DownloadPort),
|
||||
Idc: s.config.Host.IDC,
|
||||
Location: s.config.Host.Location,
|
||||
CdnClusterId: uint64(s.config.Manager.CDNClusterID),
|
||||
SeedPeerClusterId: uint64(s.config.Manager.SeedPeerClusterID),
|
||||
})
|
||||
if err != nil {
|
||||
logger.Fatalf("update cdn instance failed: %v", err)
|
||||
|
|
@ -175,8 +179,8 @@ func (s *Server) Serve() error {
|
|||
logger.Infof("====starting keepalive cdn instance %s to manager %s====", CDNInstance, s.config.Manager.Addr)
|
||||
s.configServer.KeepAlive(s.config.Manager.KeepAlive.Interval, &manager.KeepAliveRequest{
|
||||
HostName: hostutils.FQDNHostname,
|
||||
SourceType: manager.SourceType_CDN_SOURCE,
|
||||
ClusterId: uint64(s.config.Manager.CDNClusterID),
|
||||
SourceType: manager.SourceType_SEED_PEER_SOURCE,
|
||||
ClusterId: uint64(s.config.Manager.SeedPeerClusterID),
|
||||
})
|
||||
}
|
||||
}()
|
||||
|
|
|
|||
|
|
@ -40,7 +40,7 @@ func New() *Config {
|
|||
CDN: cdn.DefaultConfig(),
|
||||
Manager: ManagerConfig{
|
||||
Addr: "",
|
||||
CDNClusterID: 0,
|
||||
SeedPeerClusterID: 0,
|
||||
KeepAlive: KeepAliveConfig{
|
||||
Interval: 5 * time.Second,
|
||||
},
|
||||
|
|
@ -93,26 +93,27 @@ type ManagerConfig struct {
|
|||
// NetAddr is manager address.
|
||||
Addr string `yaml:"addr" mapstructure:"addr"`
|
||||
|
||||
// CDNClusterID is cdn cluster id.
|
||||
CDNClusterID uint `yaml:"cdnClusterID" mapstructure:"cdnClusterID"`
|
||||
// SeedPeerClusterID is seed peer cluster id.
|
||||
SeedPeerClusterID uint `yaml:"seedPeerClusterID" mapstructure:"seedPeerClusterID"`
|
||||
|
||||
// KeepAlive configuration
|
||||
// KeepAlive configuration.
|
||||
KeepAlive KeepAliveConfig `yaml:"keepAlive" mapstructure:"keepAlive"`
|
||||
}
|
||||
|
||||
func (c ManagerConfig) Validate() []error {
|
||||
var errors []error
|
||||
if c.Addr != "" {
|
||||
if c.CDNClusterID <= 0 {
|
||||
errors = append(errors, fmt.Errorf("cdn cluster id %d can't be a negative number", c.CDNClusterID))
|
||||
if c.SeedPeerClusterID <= 0 {
|
||||
errors = append(errors, fmt.Errorf("seed peer cluster id %d can't be a negative number", c.SeedPeerClusterID))
|
||||
}
|
||||
|
||||
errors = append(errors, c.KeepAlive.Validate()...)
|
||||
}
|
||||
return errors
|
||||
}
|
||||
|
||||
type KeepAliveConfig struct {
|
||||
// Keep alive interval
|
||||
// Keep alive interval.
|
||||
Interval time.Duration `yaml:"interval" mapstructure:"interval"`
|
||||
}
|
||||
|
||||
|
|
@ -125,9 +126,12 @@ func (c KeepAliveConfig) Validate() []error {
|
|||
}
|
||||
|
||||
type HostConfig struct {
|
||||
// Location for scheduler
|
||||
Location string `mapstructure:"location" yaml:"location"`
|
||||
|
||||
// IDC for scheduler
|
||||
// CDN idc.
|
||||
IDC string `mapstructure:"idc" yaml:"idc"`
|
||||
|
||||
// CDN network topology.
|
||||
NetTopology string `mapstructure:"netTopology" yaml:"netTopology"`
|
||||
|
||||
// CDN location.
|
||||
Location string `mapstructure:"location" yaml:"location"`
|
||||
}
|
||||
|
|
|
|||
|
|
@ -61,14 +61,15 @@ func TestConfig_Convert(t *testing.T) {
|
|||
WorkHome: "/workHome",
|
||||
Manager: ManagerConfig{
|
||||
Addr: "127.0.0.1:8004",
|
||||
CDNClusterID: 5,
|
||||
SeedPeerClusterID: 5,
|
||||
KeepAlive: KeepAliveConfig{
|
||||
Interval: 50 * time.Second,
|
||||
},
|
||||
},
|
||||
Host: HostConfig{
|
||||
Location: "beijing",
|
||||
IDC: "na61",
|
||||
NetTopology: "t1",
|
||||
Location: "beijing",
|
||||
},
|
||||
Metrics: &RestConfig{
|
||||
Addr: ":8081",
|
||||
|
|
@ -156,14 +157,15 @@ func TestConfig_Convert(t *testing.T) {
|
|||
},
|
||||
Manager: ManagerConfig{
|
||||
Addr: "127.0.0.1:8004",
|
||||
CDNClusterID: 5,
|
||||
SeedPeerClusterID: 5,
|
||||
KeepAlive: KeepAliveConfig{
|
||||
Interval: 50 * time.Second,
|
||||
},
|
||||
},
|
||||
Host: HostConfig{
|
||||
Location: "beijing",
|
||||
IDC: "na61",
|
||||
NetTopology: "t1",
|
||||
Location: "beijing",
|
||||
},
|
||||
LogDir: "aaa",
|
||||
WorkHome: "/workHome",
|
||||
|
|
|
|||
|
|
@ -85,7 +85,7 @@ func (css *Server) ObtainSeeds(ctx context.Context, req *cdnsystem.SeedRequest,
|
|||
// begin piece
|
||||
psc <- &cdnsystem.PieceSeed{
|
||||
PeerId: peerID,
|
||||
HostUuid: hostID,
|
||||
HostId: hostID,
|
||||
PieceInfo: &base.PieceInfo{
|
||||
PieceNum: common.BeginOfPiece,
|
||||
},
|
||||
|
|
@ -106,7 +106,7 @@ func (css *Server) ObtainSeeds(ctx context.Context, req *cdnsystem.SeedRequest,
|
|||
for piece := range pieceChan {
|
||||
pieceSeed := &cdnsystem.PieceSeed{
|
||||
PeerId: peerID,
|
||||
HostUuid: hostID,
|
||||
HostId: hostID,
|
||||
PieceInfo: &base.PieceInfo{
|
||||
PieceNum: int32(piece.PieceNum),
|
||||
RangeStart: piece.PieceRange.StartIndex,
|
||||
|
|
@ -148,7 +148,7 @@ func (css *Server) ObtainSeeds(ctx context.Context, req *cdnsystem.SeedRequest,
|
|||
}
|
||||
pieceSeed := &cdnsystem.PieceSeed{
|
||||
PeerId: peerID,
|
||||
HostUuid: hostID,
|
||||
HostId: hostID,
|
||||
Done: true,
|
||||
ContentLength: seedTask.SourceFileLength,
|
||||
TotalPieceCount: seedTask.TotalPieceCount,
|
||||
|
|
|
|||
|
|
@ -183,7 +183,7 @@ func newManagerClient(client managerclient.Client, hostOption HostOption) intern
|
|||
|
||||
func (mc *managerClient) Get() (interface{}, error) {
|
||||
schedulers, err := mc.ListSchedulers(&manager.ListSchedulersRequest{
|
||||
SourceType: manager.SourceType_CLIENT_SOURCE,
|
||||
SourceType: manager.SourceType_PEER_SOURCE,
|
||||
HostName: mc.hostOption.Hostname,
|
||||
Ip: mc.hostOption.AdvertiseIP,
|
||||
HostInfo: map[string]string{
|
||||
|
|
|
|||
|
|
@ -91,21 +91,6 @@ func (mr *MockClientMockRecorder) ListSchedulers(arg0 interface{}) *gomock.Call
|
|||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSchedulers", reflect.TypeOf((*MockClient)(nil).ListSchedulers), arg0)
|
||||
}
|
||||
|
||||
// UpdateCDN mocks base method.
|
||||
func (m *MockClient) UpdateCDN(arg0 *manager.UpdateCDNRequest) (*manager.CDN, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "UpdateCDN", arg0)
|
||||
ret0, _ := ret[0].(*manager.CDN)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// UpdateCDN indicates an expected call of UpdateCDN.
|
||||
func (mr *MockClientMockRecorder) UpdateCDN(arg0 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateCDN", reflect.TypeOf((*MockClient)(nil).UpdateCDN), arg0)
|
||||
}
|
||||
|
||||
// UpdateScheduler mocks base method.
|
||||
func (m *MockClient) UpdateScheduler(arg0 *manager.UpdateSchedulerRequest) (*manager.Scheduler, error) {
|
||||
m.ctrl.T.Helper()
|
||||
|
|
|
|||
|
|
@ -152,34 +152,52 @@ func ConvertPattern(p string, defaultPattern scheduler.Pattern) scheduler.Patter
|
|||
}
|
||||
|
||||
type SchedulerOption struct {
|
||||
// Manager is to get the scheduler configuration remotely
|
||||
// Manager is to get the scheduler configuration remotely.
|
||||
Manager ManagerOption `mapstructure:"manager" yaml:"manager"`
|
||||
// NetAddrs is scheduler addresses.
|
||||
NetAddrs []dfnet.NetAddr `mapstructure:"netAddrs" yaml:"netAddrs"`
|
||||
// ScheduleTimeout is request timeout.
|
||||
ScheduleTimeout clientutil.Duration `mapstructure:"scheduleTimeout" yaml:"scheduleTimeout"`
|
||||
// DisableAutoBackSource indicates not back source normally, only scheduler says back source
|
||||
// DisableAutoBackSource indicates not back source normally, only scheduler says back source.
|
||||
DisableAutoBackSource bool `mapstructure:"disableAutoBackSource" yaml:"disableAutoBackSource"`
|
||||
}
|
||||
|
||||
type ManagerOption struct {
|
||||
// Enable get configuration from manager
|
||||
// Enable get configuration from manager.
|
||||
Enable bool `mapstructure:"enable" yaml:"enable"`
|
||||
// NetAddrs is manager addresses.
|
||||
NetAddrs []dfnet.NetAddr `mapstructure:"netAddrs" yaml:"netAddrs"`
|
||||
// RefreshInterval is the refresh interval
|
||||
// RefreshInterval is the refresh interval.
|
||||
RefreshInterval time.Duration `mapstructure:"refreshInterval" yaml:"refreshInterval"`
|
||||
// SeedPeer configuration.
|
||||
SeedPeer SeedPeerOption `mapstructure:"seedPeer" yaml:"seedPeer"`
|
||||
}
|
||||
|
||||
type SeedPeerOption struct {
|
||||
// Enable seed peer mode.
|
||||
Enable bool `mapstructure:"enable" yaml:"enable"`
|
||||
// Type is seed peer type.
|
||||
Type string `mapstructure:"type" yaml:"type"`
|
||||
// ClusterID is seed peer cluster id.
|
||||
ClusterID uint `mapstructure:"clusterID" yaml:"clusterID"`
|
||||
// KeepAlive configuration.
|
||||
KeepAlive KeepAliveOption `yaml:"keepAlive" mapstructure:"keepAlive"`
|
||||
}
|
||||
|
||||
type KeepAliveOption struct {
|
||||
// Keep alive interval.
|
||||
Interval time.Duration `yaml:"interval" mapstructure:"interval"`
|
||||
}
|
||||
|
||||
type HostOption struct {
|
||||
// SecurityDomain is the security domain
|
||||
SecurityDomain string `mapstructure:"securityDomain" yaml:"securityDomain"`
|
||||
// Location for scheduler
|
||||
Location string `mapstructure:"location" yaml:"location"`
|
||||
// IDC for scheduler
|
||||
IDC string `mapstructure:"idc" yaml:"idc"`
|
||||
// Peerhost net topology for scheduler
|
||||
NetTopology string `mapstructure:"netTopology" yaml:"netTopology"`
|
||||
// Location for scheduler
|
||||
Location string `mapstructure:"location" yaml:"location"`
|
||||
// Hostname is daemon host name
|
||||
Hostname string `mapstructure:"hostname" yaml:"hostname"`
|
||||
// The listen ip for all tcp services of daemon
|
||||
|
|
|
|||
|
|
@ -26,6 +26,7 @@ import (
|
|||
"golang.org/x/time/rate"
|
||||
|
||||
"d7y.io/dragonfly/v2/client/clientutil"
|
||||
"d7y.io/dragonfly/v2/manager/model"
|
||||
"d7y.io/dragonfly/v2/pkg/dfnet"
|
||||
"d7y.io/dragonfly/v2/pkg/util/hostutils"
|
||||
"d7y.io/dragonfly/v2/pkg/util/net/iputils"
|
||||
|
|
@ -39,6 +40,14 @@ var peerHostConfig = DaemonOption{
|
|||
Manager: ManagerOption{
|
||||
Enable: false,
|
||||
RefreshInterval: 5 * time.Minute,
|
||||
SeedPeer: SeedPeerOption{
|
||||
Enable: false,
|
||||
Type: model.SeedPeerTypeSuperSeed,
|
||||
ClusterID: 1,
|
||||
KeepAlive: KeepAliveOption{
|
||||
Interval: 5 * time.Second,
|
||||
},
|
||||
},
|
||||
},
|
||||
NetAddrs: []dfnet.NetAddr{
|
||||
{
|
||||
|
|
|
|||
|
|
@ -26,6 +26,7 @@ import (
|
|||
"golang.org/x/time/rate"
|
||||
|
||||
"d7y.io/dragonfly/v2/client/clientutil"
|
||||
"d7y.io/dragonfly/v2/manager/model"
|
||||
"d7y.io/dragonfly/v2/pkg/dfnet"
|
||||
"d7y.io/dragonfly/v2/pkg/util/hostutils"
|
||||
"d7y.io/dragonfly/v2/pkg/util/net/iputils"
|
||||
|
|
@ -39,6 +40,14 @@ var peerHostConfig = DaemonOption{
|
|||
Manager: ManagerOption{
|
||||
Enable: false,
|
||||
RefreshInterval: 5 * time.Minute,
|
||||
SeedPeer: SeedPeerOption{
|
||||
Enable: false,
|
||||
Type: model.SeedPeerTypeSuperSeed,
|
||||
ClusterID: 1,
|
||||
KeepAlive: KeepAliveOption{
|
||||
Interval: 5 * time.Second,
|
||||
},
|
||||
},
|
||||
},
|
||||
NetAddrs: []dfnet.NetAddr{
|
||||
{
|
||||
|
|
|
|||
|
|
@ -26,6 +26,7 @@ import (
|
|||
"gopkg.in/yaml.v3"
|
||||
|
||||
"d7y.io/dragonfly/v2/client/clientutil"
|
||||
"d7y.io/dragonfly/v2/manager/model"
|
||||
"d7y.io/dragonfly/v2/pkg/dfnet"
|
||||
"d7y.io/dragonfly/v2/pkg/unit"
|
||||
)
|
||||
|
|
@ -239,6 +240,14 @@ func TestPeerHostOption_Load(t *testing.T) {
|
|||
},
|
||||
},
|
||||
RefreshInterval: 5 * time.Minute,
|
||||
SeedPeer: SeedPeerOption{
|
||||
Enable: false,
|
||||
Type: model.SeedPeerTypeStrongSeed,
|
||||
ClusterID: 2,
|
||||
KeepAlive: KeepAliveOption{
|
||||
Interval: 10 * time.Second,
|
||||
},
|
||||
},
|
||||
},
|
||||
NetAddrs: []dfnet.NetAddr{
|
||||
{
|
||||
|
|
|
|||
|
|
@ -12,6 +12,12 @@ scheduler:
|
|||
- type: tcp
|
||||
addr: 127.0.0.1:65003
|
||||
refreshInterval: 5m
|
||||
seedPeer:
|
||||
enable: false
|
||||
type: strong
|
||||
clusterID: 2
|
||||
keepAlive:
|
||||
interval: 10s
|
||||
netAddrs:
|
||||
- type: tcp
|
||||
addr: 127.0.0.1:8002
|
||||
|
|
|
|||
|
|
@ -89,6 +89,7 @@ type clientDaemon struct {
|
|||
dynconfig config.Dynconfig
|
||||
dfpath dfpath.Dfpath
|
||||
schedulers []*manager.Scheduler
|
||||
managerClient managerclient.Client
|
||||
schedulerClient schedulerclient.SchedulerClient
|
||||
}
|
||||
|
||||
|
|
@ -97,7 +98,7 @@ func New(opt *config.DaemonOption, d dfpath.Dfpath) (Daemon, error) {
|
|||
source.UpdatePluginDir(d.PluginDir())
|
||||
|
||||
host := &scheduler.PeerHost{
|
||||
Uuid: idgen.UUIDString(),
|
||||
Id: idgen.HostID(opt.Host.Hostname, int32(opt.Download.PeerGRPC.TCPListen.PortRange.Start)),
|
||||
Ip: opt.Host.AdvertiseIP,
|
||||
RpcPort: int32(opt.Download.PeerGRPC.TCPListen.PortRange.Start),
|
||||
DownPort: 0,
|
||||
|
|
@ -112,23 +113,27 @@ func New(opt *config.DaemonOption, d dfpath.Dfpath) (Daemon, error) {
|
|||
addrs []dfnet.NetAddr
|
||||
schedulers []*manager.Scheduler
|
||||
dynconfig config.Dynconfig
|
||||
managerClient managerclient.Client
|
||||
defaultPattern = config.ConvertPattern(opt.Download.DefaultPattern, scheduler.Pattern_P2P)
|
||||
)
|
||||
|
||||
if opt.Scheduler.Manager.Enable == true {
|
||||
// New manager client
|
||||
managerClient, err := managerclient.NewWithAddrs(opt.Scheduler.Manager.NetAddrs)
|
||||
var err error
|
||||
managerClient, err = managerclient.NewWithAddrs(opt.Scheduler.Manager.NetAddrs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// New dynconfig client
|
||||
if dynconfig, err = config.NewDynconfig(managerClient, d.CacheDir(), opt.Host, opt.Scheduler.Manager.RefreshInterval); err != nil {
|
||||
dynconfig, err = config.NewDynconfig(managerClient, d.CacheDir(), opt.Host, opt.Scheduler.Manager.RefreshInterval)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get schedulers from manager
|
||||
if schedulers, err = dynconfig.GetSchedulers(); err != nil {
|
||||
schedulers, err = dynconfig.GetSchedulers()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
@ -229,6 +234,7 @@ func New(opt *config.DaemonOption, d dfpath.Dfpath) (Daemon, error) {
|
|||
dynconfig: dynconfig,
|
||||
dfpath: d,
|
||||
schedulers: schedulers,
|
||||
managerClient: managerClient,
|
||||
schedulerClient: sched,
|
||||
}, nil
|
||||
}
|
||||
|
|
@ -452,6 +458,24 @@ func (cd *clientDaemon) Serve() error {
|
|||
return nil
|
||||
})
|
||||
|
||||
// enable seed peer mode
|
||||
if cd.managerClient != nil && cd.Option.Scheduler.Manager.SeedPeer.Enable {
|
||||
logger.Info("announce to manager")
|
||||
if err := cd.announceSeedPeer(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
g.Go(func() error {
|
||||
logger.Info("keepalive to manager")
|
||||
cd.managerClient.KeepAlive(cd.Option.Scheduler.Manager.SeedPeer.KeepAlive.Interval, &manager.KeepAliveRequest{
|
||||
SourceType: manager.SourceType_SEED_PEER_SOURCE,
|
||||
HostName: cd.Option.Host.Hostname,
|
||||
ClusterId: uint64(cd.Option.Scheduler.Manager.SeedPeer.ClusterID),
|
||||
})
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
if cd.Option.AliveTime.Duration > 0 {
|
||||
g.Go(func() error {
|
||||
for {
|
||||
|
|
@ -565,6 +589,13 @@ func (cd *clientDaemon) Stop() {
|
|||
}
|
||||
logger.Info("dynconfig client closed")
|
||||
}
|
||||
|
||||
if cd.managerClient != nil {
|
||||
if err := cd.managerClient.Close(); err != nil {
|
||||
logger.Errorf("manager client failed to stop: %s", err.Error())
|
||||
}
|
||||
logger.Info("manager client closed")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
|
|
@ -586,7 +617,7 @@ func (cd *clientDaemon) OnNotify(data *config.DynconfigData) {
|
|||
logger.Infof("scheduler addresses have been updated: %#v", addrs)
|
||||
}
|
||||
|
||||
// getSchedulerIPs get ips by schedulers.
|
||||
// getSchedulerIPs gets ips by schedulers.
|
||||
func getSchedulerIPs(schedulers []*manager.Scheduler) []string {
|
||||
ips := []string{}
|
||||
for _, scheduler := range schedulers {
|
||||
|
|
@ -635,6 +666,26 @@ func schedulersToAvailableNetAddrs(schedulers []*manager.Scheduler) []dfnet.NetA
|
|||
return netAddrs
|
||||
}
|
||||
|
||||
// announceSeedPeer announces seed peer to manager.
|
||||
func (cd *clientDaemon) announceSeedPeer() error {
|
||||
if _, err := cd.managerClient.UpdateSeedPeer(&manager.UpdateSeedPeerRequest{
|
||||
SourceType: manager.SourceType_SEED_PEER_SOURCE,
|
||||
HostName: cd.Option.Host.Hostname,
|
||||
Type: cd.Option.Scheduler.Manager.SeedPeer.Type,
|
||||
Idc: cd.Option.Host.IDC,
|
||||
NetTopology: cd.Option.Host.NetTopology,
|
||||
Location: cd.Option.Host.Location,
|
||||
Ip: cd.Option.Host.AdvertiseIP,
|
||||
Port: cd.schedPeerHost.RpcPort,
|
||||
DownloadPort: cd.schedPeerHost.DownPort,
|
||||
SeedPeerClusterId: uint64(cd.Option.Scheduler.Manager.SeedPeer.ClusterID),
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cd *clientDaemon) ExportTaskManager() peer.TaskManager {
|
||||
return cd.PeerTaskManager
|
||||
}
|
||||
|
|
|
|||
|
|
@ -171,7 +171,7 @@ func (ptm *peerTaskManager) newPeerTaskConductor(
|
|||
// use a new context with span info
|
||||
ctx = trace.ContextWithSpan(context.Background(), trace.SpanFromContext(ctx))
|
||||
ctx, span := tracer.Start(ctx, config.SpanPeerTask, trace.WithSpanKind(trace.SpanKindClient))
|
||||
span.SetAttributes(config.AttributePeerHost.String(ptm.host.Uuid))
|
||||
span.SetAttributes(config.AttributePeerHost.String(ptm.host.Id))
|
||||
span.SetAttributes(semconv.NetHostIPKey.String(ptm.host.Ip))
|
||||
span.SetAttributes(config.AttributePeerID.String(request.PeerId))
|
||||
span.SetAttributes(semconv.HTTPURLKey.String(request.Url))
|
||||
|
|
|
|||
|
|
@ -88,7 +88,7 @@ func (ptm *peerTaskManager) tryReuseFilePeerTask(ctx context.Context,
|
|||
}
|
||||
|
||||
_, span := tracer.Start(ctx, config.SpanReusePeerTask, trace.WithSpanKind(trace.SpanKindClient))
|
||||
span.SetAttributes(config.AttributePeerHost.String(ptm.host.Uuid))
|
||||
span.SetAttributes(config.AttributePeerHost.String(ptm.host.Id))
|
||||
span.SetAttributes(semconv.NetHostIPKey.String(ptm.host.Ip))
|
||||
span.SetAttributes(config.AttributeTaskID.String(taskID))
|
||||
span.SetAttributes(config.AttributePeerID.String(request.PeerId))
|
||||
|
|
@ -220,7 +220,7 @@ func (ptm *peerTaskManager) tryReuseStreamPeerTask(ctx context.Context,
|
|||
}
|
||||
|
||||
ctx, span := tracer.Start(ctx, config.SpanStreamTask, trace.WithSpanKind(trace.SpanKindClient))
|
||||
span.SetAttributes(config.AttributePeerHost.String(ptm.host.Uuid))
|
||||
span.SetAttributes(config.AttributePeerHost.String(ptm.host.Id))
|
||||
span.SetAttributes(semconv.NetHostIPKey.String(ptm.host.Ip))
|
||||
span.SetAttributes(config.AttributeTaskID.String(taskID))
|
||||
span.SetAttributes(config.AttributePeerID.String(request.PeerID))
|
||||
|
|
@ -280,9 +280,11 @@ func (ptm *peerTaskManager) tryReuseSeedPeerTask(ctx context.Context,
|
|||
}
|
||||
|
||||
if reuse == nil {
|
||||
if request.Range == nil {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// if request.Range == nil {
|
||||
// return nil, false
|
||||
// }
|
||||
// TODO, mock SeedTaskResponse for sub task
|
||||
// for ranged request, check the parent task
|
||||
//reuseRange = request.Range
|
||||
|
|
@ -304,7 +306,7 @@ func (ptm *peerTaskManager) tryReuseSeedPeerTask(ctx context.Context,
|
|||
}
|
||||
|
||||
ctx, span := tracer.Start(ctx, config.SpanReusePeerTask, trace.WithSpanKind(trace.SpanKindClient))
|
||||
span.SetAttributes(config.AttributePeerHost.String(ptm.host.Uuid))
|
||||
span.SetAttributes(config.AttributePeerHost.String(ptm.host.Id))
|
||||
span.SetAttributes(semconv.NetHostIPKey.String(ptm.host.Ip))
|
||||
span.SetAttributes(config.AttributeTaskID.String(taskID))
|
||||
span.SetAttributes(config.AttributePeerID.String(request.PeerId))
|
||||
|
|
|
|||
|
|
@ -267,7 +267,7 @@ func (proxy *Proxy) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||
defer metrics.ProxyRequestRunningCount.WithLabelValues(r.Method).Sub(1)
|
||||
|
||||
ctx, span := proxy.tracer.Start(r.Context(), config.SpanProxy)
|
||||
span.SetAttributes(config.AttributePeerHost.String(proxy.peerHost.Uuid))
|
||||
span.SetAttributes(config.AttributePeerHost.String(proxy.peerHost.Id))
|
||||
span.SetAttributes(semconv.NetHostIPKey.String(proxy.peerHost.Ip))
|
||||
span.SetAttributes(semconv.HTTPSchemeKey.String(r.URL.Scheme))
|
||||
span.SetAttributes(semconv.HTTPHostKey.String(r.Host))
|
||||
|
|
|
|||
|
|
@ -59,7 +59,7 @@ func (s *seeder) ObtainSeeds(seedRequest *cdnsystem.SeedRequest, seedsServer cdn
|
|||
PeerTaskRequest: scheduler.PeerTaskRequest{
|
||||
Url: seedRequest.Url,
|
||||
UrlMeta: seedRequest.UrlMeta,
|
||||
PeerId: idgen.PeerID(s.server.peerHost.Ip),
|
||||
PeerId: idgen.SeedPeerID(s.server.peerHost.Ip),
|
||||
PeerHost: s.server.peerHost,
|
||||
HostLoad: nil,
|
||||
IsMigrating: false,
|
||||
|
|
@ -95,7 +95,7 @@ func (s *seeder) ObtainSeeds(seedRequest *cdnsystem.SeedRequest, seedsServer cdn
|
|||
err = seedsServer.Send(
|
||||
&cdnsystem.PieceSeed{
|
||||
PeerId: req.PeerId,
|
||||
HostUuid: req.PeerHost.Uuid,
|
||||
HostId: req.PeerHost.Id,
|
||||
PieceInfo: &base.PieceInfo{
|
||||
PieceNum: common.BeginOfPiece,
|
||||
},
|
||||
|
|
@ -249,7 +249,7 @@ func (s *seedSynchronizer) sendOrderedPieceSeeds(desired, orderedNum int32, fini
|
|||
func (s *seedSynchronizer) compositePieceSeed(pp *base.PiecePacket, piece *base.PieceInfo) cdnsystem.PieceSeed {
|
||||
return cdnsystem.PieceSeed{
|
||||
PeerId: s.seedTaskRequest.PeerId,
|
||||
HostUuid: s.seedTaskRequest.PeerHost.Uuid,
|
||||
HostId: s.seedTaskRequest.PeerHost.Id,
|
||||
PieceInfo: &base.PieceInfo{
|
||||
PieceNum: piece.PieceNum,
|
||||
RangeStart: piece.RangeStart,
|
||||
|
|
|
|||
|
|
@ -369,12 +369,11 @@ func setupSeederServerAndClient(t *testing.T, srv *server, sd *seeder, assert *t
|
|||
}
|
||||
}()
|
||||
|
||||
client, err := cdnclient.GetClientByAddr([]dfnet.NetAddr{
|
||||
client := cdnclient.GetClientByAddr([]dfnet.NetAddr{
|
||||
{
|
||||
Type: dfnet.TCP,
|
||||
Addr: fmt.Sprintf(":%d", port),
|
||||
},
|
||||
})
|
||||
assert.Nil(err, "grpc dial should be ok")
|
||||
return port, client
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,4 @@
|
|||
cdn.json
|
||||
cdn.yaml
|
||||
dfget.yaml
|
||||
manager.yaml
|
||||
scheduler.yaml
|
||||
|
||||
seed-peer.yaml
|
||||
|
|
|
|||
|
|
@ -1,51 +0,0 @@
|
|||
worker_rlimit_nofile 100000;
|
||||
|
||||
events {
|
||||
use epoll;
|
||||
worker_connections 20480;
|
||||
}
|
||||
|
||||
http {
|
||||
include mime.types;
|
||||
default_type application/octet-stream;
|
||||
root /home/admin/cai/htdocs;
|
||||
sendfile on;
|
||||
tcp_nopush on;
|
||||
|
||||
server_tokens off;
|
||||
keepalive_timeout 5;
|
||||
|
||||
client_header_timeout 1m;
|
||||
send_timeout 1m;
|
||||
client_max_body_size 3m;
|
||||
|
||||
index index.html index.htm;
|
||||
access_log off;
|
||||
log_not_found off;
|
||||
|
||||
gzip on;
|
||||
gzip_http_version 1.0;
|
||||
gzip_comp_level 6;
|
||||
gzip_min_length 1024;
|
||||
gzip_proxied any;
|
||||
gzip_vary on;
|
||||
gzip_disable msie6;
|
||||
gzip_buffers 96 8k;
|
||||
gzip_types text/xml text/plain text/css application/javascript application/x-javascript application/rss+xml application/json;
|
||||
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header Web-Server-Type nginx;
|
||||
proxy_set_header WL-Proxy-Client-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_redirect off;
|
||||
proxy_buffers 128 8k;
|
||||
proxy_intercept_errors on;
|
||||
|
||||
server {
|
||||
listen 8001;
|
||||
location / {
|
||||
root /tmp/cdn;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -28,7 +28,7 @@ services:
|
|||
retries: 30
|
||||
|
||||
manager:
|
||||
image: dragonflyoss/manager:v2.0.2
|
||||
image: dragonflyoss/manager:v2.0.3-beta.1
|
||||
container_name: manager
|
||||
network_mode: host
|
||||
depends_on:
|
||||
|
|
@ -45,10 +45,11 @@ services:
|
|||
retries: 30
|
||||
|
||||
dfdaemon:
|
||||
image: dragonflyoss/dfdaemon:v2.0.2
|
||||
image: dragonflyoss/dfdaemon:v2.0.3-beta.1
|
||||
depends_on:
|
||||
- manager
|
||||
- scheduler
|
||||
- cdn
|
||||
- seed-peer
|
||||
container_name: dfdaemon
|
||||
network_mode: host
|
||||
restart: always
|
||||
|
|
@ -62,9 +63,8 @@ services:
|
|||
- ./config/dfget.yaml:/etc/dragonfly/dfget.yaml:ro
|
||||
|
||||
scheduler:
|
||||
image: dragonflyoss/scheduler:v2.0.2
|
||||
image: dragonflyoss/scheduler:v2.0.3-beta.1
|
||||
depends_on:
|
||||
- cdn
|
||||
- manager
|
||||
container_name: scheduler
|
||||
network_mode: host
|
||||
|
|
@ -78,17 +78,19 @@ services:
|
|||
- /tmp/log/dragonfly:/var/log/dragonfly
|
||||
- ./config/scheduler.yaml:/etc/dragonfly/scheduler.yaml:ro
|
||||
|
||||
cdn:
|
||||
image: dragonflyoss/cdn:v2.0.2
|
||||
container_name: cdn
|
||||
seed-peer:
|
||||
image: dragonflyoss/dfdaemon:v2.0.3-beta.1
|
||||
depends_on:
|
||||
- manager
|
||||
- scheduler
|
||||
container_name: seed-peer
|
||||
network_mode: host
|
||||
restart: always
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "/bin/grpc_health_probe -addr=:8003 || exit 1"]
|
||||
test: ["CMD-SHELL", "/bin/grpc_health_probe -addr=:65100 || exit 1"]
|
||||
interval: 1s
|
||||
timeout: 3s
|
||||
retries: 30
|
||||
volumes:
|
||||
- /tmp/log/dragonfly:/var/log/dragonfly
|
||||
- ./config/cdn.yaml:/etc/dragonfly/cdn.yaml:ro
|
||||
- ./config/nginx.conf:/etc/nginx/nginx.conf:ro
|
||||
- ./config/seed-peer.yaml:/etc/dragonfly/dfget.yaml:ro
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ prepare(){
|
|||
ip=${IP:-$(hostname -i)}
|
||||
|
||||
sed "s,__IP__,$ip," template/dfget.template.yaml > config/dfget.yaml
|
||||
sed "s,__IP__,$ip," template/cdn.template.yaml > config/cdn.yaml
|
||||
sed "s,__IP__,$ip," template/seed-peer.template.yaml > config/seed-peer.yaml
|
||||
sed "s,__IP__,$ip," template/scheduler.template.yaml > config/scheduler.yaml
|
||||
sed "s,__IP__,$ip," template/manager.template.yaml > config/manager.yaml
|
||||
}
|
||||
|
|
@ -24,20 +24,19 @@ run_container(){
|
|||
echo use container runtime: ${RUNTIME}
|
||||
|
||||
echo try to clean old containers
|
||||
${RUNTIME} rm -f dragonfly-cdn dragonfly-scheduler dragonfly-dfdaemon
|
||||
${RUNTIME} rm -f dragonfly-manager dragonfly-scheduler dragonfly-dfdaemon
|
||||
|
||||
printf "create dragonfly-manager "
|
||||
${RUNTIME} run -d --name dragonfly-cdn --net=host \
|
||||
${RUNTIME} run -d --name dragonfly-manager --net=host \
|
||||
-v /tmp/log/dragonfly:/var/log/dragonfly \
|
||||
-v ${DIR}/config/manager.yaml:/etc/dragonfly/manager.yaml \
|
||||
${REPO}/manager:${TAG}
|
||||
|
||||
printf "create dragonfly-cdn "
|
||||
${RUNTIME} run -d --name dragonfly-cdn --net=host \
|
||||
printf "create dragonfly-seed-peer "
|
||||
${RUNTIME} run -d --name dragonfly-seed-peer --net=host \
|
||||
-v /tmp/log/dragonfly:/var/log/dragonfly \
|
||||
-v ${DIR}/config/cdn.yaml:/etc/dragonfly/cdn.yaml \
|
||||
-v ${DIR}/config/nginx.conf:/etc/nginx/nginx.conf \
|
||||
${REPO}/cdn:${TAG}
|
||||
-v ${DIR}/config/seed-peer.yaml:/etc/dragonfly/dfget.yaml \
|
||||
${REPO}/dfdaemon:${TAG}
|
||||
|
||||
printf "create dragonfly-scheduler "
|
||||
${RUNTIME} run -d --name dragonfly-scheduler --net=host \
|
||||
|
|
|
|||
|
|
@ -1,129 +0,0 @@
|
|||
# This file is the template of cdn system configuration file.
|
||||
# You can configure your cdn system by change the parameter according your requirement.
|
||||
---
|
||||
base:
|
||||
# listenPort is the port cdn server listens on.
|
||||
# default: 8003
|
||||
listenPort: 8003
|
||||
|
||||
# DownloadPort is the port for download files from cdn.
|
||||
# And you should start a file server firstly which listens on the download port.
|
||||
# default: 8001
|
||||
downloadPort: 8001
|
||||
|
||||
# SystemReservedBandwidth is the network bandwidth reserved for system software.
|
||||
# default: 20 MB, in format of G(B)/g/M(B)/m/K(B)/k/B, pure number will also be parsed as Byte.
|
||||
systemReservedBandwidth: 20M
|
||||
|
||||
# MaxBandwidth is the network bandwidth that cdn can use.
|
||||
# default: 1G, in format of G(B)/g/M(B)/m/K(B)/k/B, pure number will also be parsed as Byte.
|
||||
maxBandwidth: 1G
|
||||
|
||||
# AdvertiseIP is used to set the ip that we advertise to other peer in the p2p-network.
|
||||
# By default, the first non-loop address is advertised.
|
||||
advertiseIP: __IP__
|
||||
|
||||
# FailAccessInterval is the interval time after failed to access the URL.
|
||||
# If a task failed to be downloaded from the source, it will not be retried in the time since the last failure.
|
||||
# default: 3m
|
||||
failAccessInterval: 3m
|
||||
|
||||
# GCInitialDelay is the delay time from the start to the first GC execution.
|
||||
# default: 6s
|
||||
gcInitialDelay: 6s
|
||||
|
||||
# GCMetaInterval is the interval time to execute GC meta.
|
||||
# default: 2m0s
|
||||
gcMetaInterval: 2m
|
||||
|
||||
# TaskExpireTime when a task is not accessed within the taskExpireTime,
|
||||
# and it will be treated to be expired.
|
||||
# default: 3m0s
|
||||
taskExpireTime: 3m
|
||||
|
||||
# storageMode is the Mode of storage policy, [disk/hybrid]
|
||||
storageMode: disk
|
||||
|
||||
|
||||
# logDir is the log storage directory
|
||||
# in linux, default value is /var/log/dragonfly
|
||||
# in macos(just for testing), default value is /Users/$USER/.dragonfly/logs
|
||||
logDir: ""
|
||||
|
||||
# manager configuration
|
||||
manager:
|
||||
addr: "__IP__:65003"
|
||||
cdnClusterID: "1"
|
||||
keepAlive:
|
||||
interval: 5s
|
||||
|
||||
# host configuration
|
||||
host:
|
||||
location:
|
||||
idc:
|
||||
|
||||
# enable prometheus metrics
|
||||
# metrics:
|
||||
# # metrics service address
|
||||
# addr: ":8000"
|
||||
|
||||
|
||||
plugins:
|
||||
storageDriver:
|
||||
- name: disk
|
||||
enable: true
|
||||
config:
|
||||
baseDir: /tmp/cdn
|
||||
- name: memory
|
||||
enable: false
|
||||
config:
|
||||
baseDir: /dev/shm/dragonfly
|
||||
|
||||
storagemanager:
|
||||
- name: disk
|
||||
enable: true
|
||||
config:
|
||||
gcInitialDelay: 0s
|
||||
gcInterval: 15s
|
||||
driverConfigs:
|
||||
disk:
|
||||
gcConfig:
|
||||
youngGCThreshold: 100.0GB
|
||||
fullGCThreshold: 5.0GB
|
||||
cleanRatio: 1
|
||||
intervalThreshold: 2h0m0s
|
||||
- name: hybrid
|
||||
enable: false
|
||||
config:
|
||||
gcInitialDelay: 0s
|
||||
gcInterval: 15s
|
||||
driverConfigs:
|
||||
disk:
|
||||
gcConfig:
|
||||
youngGCThreshold: 100.0GB
|
||||
fullGCThreshold: 5.0GB
|
||||
cleanRatio: 1
|
||||
intervalThreshold: 2h0m0s
|
||||
memory:
|
||||
gcConfig:
|
||||
youngGCThreshold: 100.0GB
|
||||
fullGCThreshold: 5.0GB
|
||||
cleanRatio: 3
|
||||
intervalThreshold: 2h0m0s
|
||||
|
||||
# console shows log on console
|
||||
console: false
|
||||
|
||||
# whether to enable debug level logger and enable pprof
|
||||
verbose: true
|
||||
|
||||
# listen port for pprof, only valid when the verbose option is true
|
||||
# default is -1. If it is 0, pprof will use a random port.
|
||||
pprof-port: -1
|
||||
|
||||
# jaeger endpoint url, like: http://jaeger.dragonfly.svc:14268/api/traces
|
||||
jaeger: ""
|
||||
|
||||
# service name used in tracer
|
||||
# default: dragonfly-cdn
|
||||
service-name: dragonfly-cdn
|
||||
|
|
@ -26,7 +26,7 @@ scheduler:
|
|||
# the dragonfly working directory plugins
|
||||
algorithm: default
|
||||
# backSourceCount is the number of backsource clients
|
||||
# when the CDN is unavailable
|
||||
# when the seed peer is unavailable
|
||||
backSourceCount: 3
|
||||
# retry scheduling back-to-source limit times
|
||||
retryBackSourceLimit: 5
|
||||
|
|
@ -74,13 +74,6 @@ manager:
|
|||
# interval
|
||||
interval: 5s
|
||||
|
||||
# cdn configuration
|
||||
cdn:
|
||||
# scheduler enable cdn as P2P peer,
|
||||
# if the value is false, P2P network will not be back-to-source through
|
||||
# cdn but by dfdaemon and preheat feature does not work
|
||||
enable: true
|
||||
|
||||
# machinery async job configuration,
|
||||
# see https://github.com/RichardKnop/machinery
|
||||
job:
|
||||
|
|
|
|||
|
|
@ -0,0 +1,180 @@
|
|||
# daemon alive time, when sets 0s, daemon will not auto exit
|
||||
# it is useful for longtime running
|
||||
aliveTime: 0s
|
||||
|
||||
# daemon gc task running interval
|
||||
gcInterval: 1m0s
|
||||
|
||||
# daemon work directory, daemon will change current working directory to this
|
||||
# in linux, default value is /usr/local/dragonfly
|
||||
# in macos(just for testing), default value is /Users/$USER/.dragonfly
|
||||
workHome: ""
|
||||
|
||||
# cacheDir is dynconfig cache storage directory
|
||||
# in linux, default value is /var/cache/dragonfly
|
||||
# in macos(just for testing), default value is /Users/$USER/.dragonfly/cache
|
||||
cacheDir: ""
|
||||
|
||||
# logDir is the log storage directory
|
||||
# in linux, default value is /var/log/dragonfly
|
||||
# in macos(just for testing), default value is /Users/$USER/.dragonfly/logs
|
||||
logDir: ""
|
||||
|
||||
# dataDir is the download data storage directory
|
||||
# in linux, default value is /var/lib/dragonfly
|
||||
# in macos(just for testing), default value is /Users/$USER/.dragonfly/data
|
||||
dataDir: ""
|
||||
|
||||
# when daemon exit, keep peer task data or not
|
||||
# it is usefully when upgrade daemon service, all local cache will be saved
|
||||
# default is false
|
||||
keepStorage: true
|
||||
|
||||
# console shows log on console
|
||||
console: false
|
||||
|
||||
# whether to enable debug level logger and enable pprof
|
||||
verbose: true
|
||||
|
||||
# listen port for pprof, only valid when the verbose option is true
|
||||
# default is -1. If it is 0, pprof will use a random port.
|
||||
pprof-port: -1
|
||||
|
||||
# jaeger endpoint url, like: http://jaeger.dragonfly.svc:14268/api/traces
|
||||
jaeger: ""
|
||||
|
||||
# all addresses of all schedulers
|
||||
# the schedulers of all daemons should be same in one region or zone.
|
||||
# daemon will send tasks to a fixed scheduler by hashing the task url and meta data
|
||||
# caution: only tcp is supported
|
||||
scheduler:
|
||||
manager:
|
||||
# get scheduler list dynamically from manager
|
||||
enable: true
|
||||
# manager service addresses
|
||||
netAddrs:
|
||||
- type: tcp
|
||||
addr: __IP__:65003
|
||||
# scheduler list refresh interval
|
||||
refreshInterval: 10s
|
||||
seedPeer:
|
||||
enable: true
|
||||
type: "super"
|
||||
clusterID: 1
|
||||
# schedule timeout
|
||||
scheduleTimeout: 30s
|
||||
# when true, only scheduler says back source, daemon can back source
|
||||
disableAutoBackSource: false
|
||||
# below example is a stand address
|
||||
netAddrs:
|
||||
- type: tcp
|
||||
addr: __IP__:8002
|
||||
|
||||
# current host info used for scheduler
|
||||
host:
|
||||
# tcp service listen address
|
||||
# port should be set by other options
|
||||
listenIP: 0.0.0.0
|
||||
# access ip for other peers
|
||||
# when local ip is different with access ip, advertiseIP should be set
|
||||
advertiseIP: __IP__
|
||||
# geographical location, separated by "|" characters
|
||||
location: ""
|
||||
# idc deployed by daemon
|
||||
idc: ""
|
||||
# security domain deployed by daemon, network isolation between different security domains
|
||||
securityDomain: ""
|
||||
# network topology, separated by "|" characters
|
||||
netTopology: ""
|
||||
# daemon hostname
|
||||
# hostname: ""
|
||||
|
||||
# download service option
|
||||
download:
|
||||
# calculate digest when transfer files, set false to save memory
|
||||
calculateDigest: true
|
||||
# total download limit per second
|
||||
totalRateLimit: 2048Mi
|
||||
# per peer task download limit per second
|
||||
perPeerRateLimit: 1024Mi
|
||||
# download piece timeout
|
||||
pieceDownloadTimeout: 30s
|
||||
# golang transport option
|
||||
transportOption:
|
||||
# dial timeout
|
||||
dialTimeout: 2s
|
||||
# keep alive
|
||||
keepAlive: 30s
|
||||
# same with http.Transport.MaxIdleConns
|
||||
maxIdleConns: 100
|
||||
# same with http.Transport.IdleConnTimeout
|
||||
idleConnTimeout: 90s
|
||||
# same with http.Transport.ResponseHeaderTimeout
|
||||
responseHeaderTimeout: 2s
|
||||
# same with http.Transport.TLSHandshakeTimeout
|
||||
tlsHandshakeTimeout: 1s
|
||||
# same with http.Transport.ExpectContinueTimeout
|
||||
expectContinueTimeout: 2s
|
||||
# download grpc option
|
||||
downloadGRPC:
|
||||
# security option
|
||||
security:
|
||||
insecure: true
|
||||
# download service listen address
|
||||
# current, only support unix domain socket
|
||||
unixListen:
|
||||
# in linux, default value is /var/run/dfdaemon.sock
|
||||
# in macos(just for testing), default value is /tmp/dfdaemon.sock
|
||||
socket: /var/run/dfdaemon.sock
|
||||
# peer grpc option
|
||||
# peer grpc service send pieces info to other peers
|
||||
peerGRPC:
|
||||
security:
|
||||
insecure: true
|
||||
tcpListen:
|
||||
# listen address
|
||||
listen: 0.0.0.0
|
||||
# listen port, daemon will try to listen
|
||||
# when this port is not available, daemon will try next port
|
||||
port: 65100
|
||||
# if want to limit upper port, please use blow format
|
||||
# port:
|
||||
# start: 65000
|
||||
# end: 65009
|
||||
|
||||
# upload service option
|
||||
upload:
|
||||
# upload limit per second
|
||||
rateLimit: 2048Mi
|
||||
security:
|
||||
insecure: true
|
||||
tcpListen:
|
||||
# listen address
|
||||
listen: 0.0.0.0
|
||||
# listen port, daemon will try to listen
|
||||
# when this port is not available, daemon will try next port
|
||||
port: 65102
|
||||
# if want to limit upper port, please use blow format
|
||||
# port:
|
||||
# start: 65020
|
||||
# end: 65029
|
||||
|
||||
# peer task storage option
|
||||
storage:
|
||||
# task data expire time
|
||||
# when there is no access to a task data, this task will be gc.
|
||||
taskExpireTime: 6h
|
||||
# storage strategy when process task data
|
||||
# io.d7y.storage.v2.simple : download file to data directory first, then copy to output path, this is default action
|
||||
# the download file in date directory will be the peer data for uploading to other peers
|
||||
# io.d7y.storage.v2.advance: download file directly to output path with postfix, hard link to final output,
|
||||
# avoid copy to output path, fast than simple strategy, but:
|
||||
# the output file with postfix will be the peer data for uploading to other peers
|
||||
# when user delete or change this file, this peer data will be corrupted
|
||||
# default is io.d7y.storage.v2.advance
|
||||
strategy: io.d7y.storage.v2.advance
|
||||
# disk used percent gc threshold, when the disk used percent exceeds, the oldest tasks will be reclaimed.
|
||||
# eg, diskGCThresholdPercent=90, when the disk usage is above 80%, start to gc the oldest tasks
|
||||
diskGCThresholdPercent: 90
|
||||
# set to ture for reusing underlying storage for same task id
|
||||
multiplex: true
|
||||
|
|
@ -1 +1 @@
|
|||
Subproject commit bff751a963c4c278c35dfb240eb7cb0ca52eef38
|
||||
Subproject commit 34cf932be50b23ccb4f0f6cb501598dc47a06d7f
|
||||
|
|
@ -20,7 +20,6 @@ package job
|
|||
const (
|
||||
GlobalQueue = Queue("global")
|
||||
SchedulersQueue = Queue("schedulers")
|
||||
CDNsQueue = Queue("cdns")
|
||||
)
|
||||
|
||||
// Job Name
|
||||
|
|
|
|||
|
|
@ -36,18 +36,6 @@ func GetSchedulerQueue(clusterID uint, hostname string) (Queue, error) {
|
|||
return Queue(fmt.Sprintf("scheduler_%d_%s", clusterID, hostname)), nil
|
||||
}
|
||||
|
||||
func GetCDNQueue(clusterID uint, hostname string) (Queue, error) {
|
||||
if clusterID == 0 {
|
||||
return Queue(""), errors.New("empty cluster id config is not specified")
|
||||
}
|
||||
|
||||
if hostname == "" {
|
||||
return Queue(""), errors.New("empty hostname config is not specified")
|
||||
}
|
||||
|
||||
return Queue(fmt.Sprintf("cdn_%d_%s", clusterID, hostname)), nil
|
||||
}
|
||||
|
||||
func (q Queue) String() string {
|
||||
return string(q)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -65,47 +65,3 @@ func TestJobGetSchedulerQueue(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestJobGetCDNQueue(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
clusterID uint
|
||||
hostname string
|
||||
expect func(t *testing.T, result Queue, err error)
|
||||
}{
|
||||
{
|
||||
name: "get cdn queue",
|
||||
clusterID: 1,
|
||||
hostname: "foo",
|
||||
expect: func(t *testing.T, result Queue, err error) {
|
||||
assert := assert.New(t)
|
||||
assert.Equal(Queue("cdn_1_foo"), result)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "get cdn queue with empty hostname",
|
||||
clusterID: 1,
|
||||
hostname: "",
|
||||
expect: func(t *testing.T, result Queue, err error) {
|
||||
assert := assert.New(t)
|
||||
assert.EqualError(err, "empty hostname config is not specified")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "get scheduler queue with empty clusterID",
|
||||
clusterID: 0,
|
||||
hostname: "foo",
|
||||
expect: func(t *testing.T, result Queue, err error) {
|
||||
assert := assert.New(t)
|
||||
assert.EqualError(err, "empty cluster id config is not specified")
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
queue, err := GetCDNQueue(tc.clusterID, tc.hostname)
|
||||
tc.expect(t, queue, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -30,9 +30,6 @@ const (
|
|||
// Seed Peer prefix of cache key.
|
||||
SeedPeerNamespace = "seed-peer"
|
||||
|
||||
// CDN prefix of cache key.
|
||||
CDNNamespace = "cdn"
|
||||
|
||||
// Scheduler prefix of cache key.
|
||||
SchedulerNamespace = "scheduler"
|
||||
|
||||
|
|
@ -79,11 +76,6 @@ func MakeSeedPeerCacheKey(hostname string, clusterID uint) string {
|
|||
return MakeCacheKey(SeedPeerNamespace, fmt.Sprintf("%s-%d", hostname, clusterID))
|
||||
}
|
||||
|
||||
// Deprecated: Use MakeSeedPeerCacheKey instead.
|
||||
func MakeCDNCacheKey(hostname string, clusterID uint) string {
|
||||
return MakeCacheKey(CDNNamespace, fmt.Sprintf("%s-%d", hostname, clusterID))
|
||||
}
|
||||
|
||||
// Make cache key for scheduler
|
||||
func MakeSchedulerCacheKey(hostname string, clusterID uint) string {
|
||||
return MakeCacheKey(SchedulerNamespace, fmt.Sprintf("%s-%d", hostname, clusterID))
|
||||
|
|
|
|||
|
|
@ -1 +1 @@
|
|||
Subproject commit 9507a025f24729443c7339906b624a75cdafc143
|
||||
Subproject commit bb9e98344779d1ca6f5ae989c48f9a25cb5c10e8
|
||||
|
|
@ -98,8 +98,8 @@ func formatDSN(cfg *config.MysqlConfig) (string, error) {
|
|||
func migrate(db *gorm.DB) error {
|
||||
return db.Set("gorm:table_options", "DEFAULT CHARSET=utf8mb4 ROW_FORMAT=Dynamic").AutoMigrate(
|
||||
&model.Job{},
|
||||
&model.CDNCluster{},
|
||||
&model.CDN{},
|
||||
&model.SeedPeerCluster{},
|
||||
&model.SeedPeer{},
|
||||
&model.SchedulerCluster{},
|
||||
&model.Scheduler{},
|
||||
&model.SecurityRule{},
|
||||
|
|
@ -112,25 +112,6 @@ func migrate(db *gorm.DB) error {
|
|||
}
|
||||
|
||||
func seed(db *gorm.DB) error {
|
||||
var cdnClusterCount int64
|
||||
if err := db.Model(model.CDNCluster{}).Count(&cdnClusterCount).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
if cdnClusterCount <= 0 {
|
||||
if err := db.Create(&model.CDNCluster{
|
||||
Model: model.Model{
|
||||
ID: uint(1),
|
||||
},
|
||||
Name: "cdn-cluster-1",
|
||||
Config: map[string]interface{}{
|
||||
"load_limit": schedulerconfig.DefaultCDNLoadLimit,
|
||||
},
|
||||
IsDefault: true,
|
||||
}).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var schedulerClusterCount int64
|
||||
if err := db.Model(model.SchedulerCluster{}).Count(&schedulerClusterCount).Error; err != nil {
|
||||
return err
|
||||
|
|
@ -155,9 +136,26 @@ func seed(db *gorm.DB) error {
|
|||
}
|
||||
}
|
||||
|
||||
if schedulerClusterCount == 0 && cdnClusterCount == 0 {
|
||||
cdnCluster := model.CDNCluster{}
|
||||
if err := db.First(&cdnCluster).Error; err != nil {
|
||||
var seedPeerClusterCount int64
|
||||
if err := db.Model(model.SeedPeerCluster{}).Count(&seedPeerClusterCount).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
if seedPeerClusterCount <= 0 {
|
||||
if err := db.Create(&model.SeedPeerCluster{
|
||||
Model: model.Model{
|
||||
ID: uint(1),
|
||||
},
|
||||
Name: "seed-peer-cluster-1",
|
||||
Config: map[string]interface{}{
|
||||
"load_limit": schedulerconfig.DefaultSeedPeerLoadLimit,
|
||||
},
|
||||
IsDefault: true,
|
||||
}).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
seedPeerCluster := model.SeedPeerCluster{}
|
||||
if err := db.First(&seedPeerCluster).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
@ -166,7 +164,7 @@ func seed(db *gorm.DB) error {
|
|||
return err
|
||||
}
|
||||
|
||||
if err := db.Model(&cdnCluster).Association("SchedulerClusters").Append(&schedulerCluster); err != nil {
|
||||
if err := db.Model(&seedPeerCluster).Association("SchedulerClusters").Append(&schedulerCluster); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -277,57 +277,3 @@ func (h *Handlers) DeleteSeedPeerClusterToApplication(ctx *gin.Context) {
|
|||
|
||||
ctx.Status(http.StatusOK)
|
||||
}
|
||||
|
||||
// @Summary Add CDN to Application
|
||||
// @Description Add CDN to Application
|
||||
// @Tags Application
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
// @Param id path string true "id"
|
||||
// @Param cdn_cluster_id path string true "cdn cluster id"
|
||||
// @Success 200
|
||||
// @Failure 400
|
||||
// @Failure 404
|
||||
// @Failure 500
|
||||
// @Router /applications/{id}/cdn-clusters/{cdn_cluster_id} [put]
|
||||
func (h *Handlers) AddCDNClusterToApplication(ctx *gin.Context) {
|
||||
var params types.AddCDNClusterToApplicationParams
|
||||
if err := ctx.ShouldBindUri(¶ms); err != nil {
|
||||
ctx.JSON(http.StatusUnprocessableEntity, gin.H{"errors": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
if err := h.service.AddCDNClusterToApplication(ctx.Request.Context(), params.ID, params.CDNClusterID); err != nil {
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
ctx.Status(http.StatusOK)
|
||||
}
|
||||
|
||||
// @Summary Delete CDN to Application
|
||||
// @Description Delete CDN to Application
|
||||
// @Tags Application
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
// @Param id path string true "id"
|
||||
// @Param cdn_cluster_id path string true "cdn cluster id"
|
||||
// @Success 200
|
||||
// @Failure 400
|
||||
// @Failure 404
|
||||
// @Failure 500
|
||||
// @Router /applications/{id}/cdn-clusters/{cdn_cluster_id} [delete]
|
||||
func (h *Handlers) DeleteCDNClusterToApplication(ctx *gin.Context) {
|
||||
var params types.DeleteCDNClusterToApplicationParams
|
||||
if err := ctx.ShouldBindUri(¶ms); err != nil {
|
||||
ctx.JSON(http.StatusUnprocessableEntity, gin.H{"errors": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
if err := h.service.DeleteCDNClusterToApplication(ctx.Request.Context(), params.ID, params.CDNClusterID); err != nil {
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
ctx.Status(http.StatusOK)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,171 +0,0 @@
|
|||
/*
|
||||
* Copyright 2020 The Dragonfly Authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
|
||||
// nolint
|
||||
_ "d7y.io/dragonfly/v2/manager/model"
|
||||
"d7y.io/dragonfly/v2/manager/types"
|
||||
)
|
||||
|
||||
// @Summary Create CDN
|
||||
// @Description create by json config
|
||||
// @Tags CDN
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
// @Param CDN body types.CreateCDNRequest true "CDN"
|
||||
// @Success 200 {object} model.CDN
|
||||
// @Failure 400
|
||||
// @Failure 404
|
||||
// @Failure 500
|
||||
// @Router /cdns [post]
|
||||
func (h *Handlers) CreateCDN(ctx *gin.Context) {
|
||||
var json types.CreateCDNRequest
|
||||
if err := ctx.ShouldBindJSON(&json); err != nil {
|
||||
ctx.JSON(http.StatusUnprocessableEntity, gin.H{"errors": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
cdn, err := h.service.CreateCDN(ctx.Request.Context(), json)
|
||||
if err != nil {
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
ctx.JSON(http.StatusOK, cdn)
|
||||
}
|
||||
|
||||
// @Summary Destroy CDN
|
||||
// @Description Destroy by id
|
||||
// @Tags CDN
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
// @Param id path string true "id"
|
||||
// @Success 200
|
||||
// @Failure 400
|
||||
// @Failure 404
|
||||
// @Failure 500
|
||||
// @Router /cdns/{id} [delete]
|
||||
func (h *Handlers) DestroyCDN(ctx *gin.Context) {
|
||||
var params types.CDNParams
|
||||
if err := ctx.ShouldBindUri(¶ms); err != nil {
|
||||
ctx.JSON(http.StatusUnprocessableEntity, gin.H{"errors": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
if err := h.service.DestroyCDN(ctx.Request.Context(), params.ID); err != nil {
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
ctx.Status(http.StatusOK)
|
||||
}
|
||||
|
||||
// @Summary Update CDN
|
||||
// @Description Update by json config
|
||||
// @Tags CDN
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
// @Param id path string true "id"
|
||||
// @Param CDN body types.UpdateCDNRequest true "CDN"
|
||||
// @Success 200 {object} model.CDN
|
||||
// @Failure 400
|
||||
// @Failure 404
|
||||
// @Failure 500
|
||||
// @Router /cdns/{id} [patch]
|
||||
func (h *Handlers) UpdateCDN(ctx *gin.Context) {
|
||||
var params types.CDNParams
|
||||
if err := ctx.ShouldBindUri(¶ms); err != nil {
|
||||
ctx.JSON(http.StatusUnprocessableEntity, gin.H{"errors": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
var json types.UpdateCDNRequest
|
||||
if err := ctx.ShouldBindJSON(&json); err != nil {
|
||||
ctx.JSON(http.StatusUnprocessableEntity, gin.H{"errors": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
cdn, err := h.service.UpdateCDN(ctx.Request.Context(), params.ID, json)
|
||||
if err != nil {
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
ctx.JSON(http.StatusOK, cdn)
|
||||
}
|
||||
|
||||
// @Summary Get CDN
|
||||
// @Description Get CDN by id
|
||||
// @Tags CDN
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
// @Param id path string true "id"
|
||||
// @Success 200 {object} model.CDN
|
||||
// @Failure 400
|
||||
// @Failure 404
|
||||
// @Failure 500
|
||||
// @Router /cdns/{id} [get]
|
||||
func (h *Handlers) GetCDN(ctx *gin.Context) {
|
||||
var params types.CDNParams
|
||||
if err := ctx.ShouldBindUri(¶ms); err != nil {
|
||||
ctx.JSON(http.StatusUnprocessableEntity, gin.H{"errors": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
cdn, err := h.service.GetCDN(ctx.Request.Context(), params.ID)
|
||||
if err != nil {
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
ctx.JSON(http.StatusOK, cdn)
|
||||
}
|
||||
|
||||
// @Summary Get CDNs
|
||||
// @Description Get CDNs
|
||||
// @Tags CDN
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
// @Param page query int true "current page" default(0)
|
||||
// @Param per_page query int true "return max item count, default 10, max 50" default(10) minimum(2) maximum(50)
|
||||
// @Success 200 {object} []model.CDN
|
||||
// @Failure 400
|
||||
// @Failure 404
|
||||
// @Failure 500
|
||||
// @Router /cdns [get]
|
||||
func (h *Handlers) GetCDNs(ctx *gin.Context) {
|
||||
var query types.GetCDNsQuery
|
||||
if err := ctx.ShouldBindQuery(&query); err != nil {
|
||||
ctx.JSON(http.StatusUnprocessableEntity, gin.H{"errors": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
h.setPaginationDefault(&query.Page, &query.PerPage)
|
||||
cdns, count, err := h.service.GetCDNs(ctx.Request.Context(), query)
|
||||
if err != nil {
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
h.setPaginationLinkHeader(ctx, query.Page, query.PerPage, int(count))
|
||||
ctx.JSON(http.StatusOK, cdns)
|
||||
}
|
||||
|
|
@ -1,225 +0,0 @@
|
|||
/*
|
||||
* Copyright 2020 The Dragonfly Authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
|
||||
// nolint
|
||||
_ "d7y.io/dragonfly/v2/manager/model"
|
||||
"d7y.io/dragonfly/v2/manager/types"
|
||||
)
|
||||
|
||||
// @Summary Create CDNCluster
|
||||
// @Description create by json config
|
||||
// @Tags CDNCluster
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
// @Param CDNCluster body types.CreateCDNClusterRequest true "DNCluster"
|
||||
// @Success 200 {object} model.CDNCluster
|
||||
// @Failure 400
|
||||
// @Failure 404
|
||||
// @Failure 500
|
||||
// @Router /cdn-clusters [post]
|
||||
func (h *Handlers) CreateCDNCluster(ctx *gin.Context) {
|
||||
var json types.CreateCDNClusterRequest
|
||||
if err := ctx.ShouldBindJSON(&json); err != nil {
|
||||
ctx.JSON(http.StatusUnprocessableEntity, gin.H{"errors": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
cdnCluster, err := h.service.CreateCDNCluster(ctx.Request.Context(), json)
|
||||
if err != nil {
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
ctx.JSON(http.StatusOK, cdnCluster)
|
||||
}
|
||||
|
||||
// @Summary Destroy CDNCluster
|
||||
// @Description Destroy by id
|
||||
// @Tags CDNCluster
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
// @Param id path string true "id"
|
||||
// @Success 200
|
||||
// @Failure 400
|
||||
// @Failure 404
|
||||
// @Failure 500
|
||||
// @Router /cdn-clusters/{id} [delete]
|
||||
func (h *Handlers) DestroyCDNCluster(ctx *gin.Context) {
|
||||
var params types.CDNClusterParams
|
||||
if err := ctx.ShouldBindUri(¶ms); err != nil {
|
||||
ctx.JSON(http.StatusUnprocessableEntity, gin.H{"errors": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
if err := h.service.DestroyCDNCluster(ctx.Request.Context(), params.ID); err != nil {
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
ctx.Status(http.StatusOK)
|
||||
}
|
||||
|
||||
// @Summary Update CDNCluster
|
||||
// @Description Update by json config
|
||||
// @Tags CDNCluster
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
// @Param id path string true "id"
|
||||
// @Param CDNCluster body types.UpdateCDNClusterRequest true "CDNCluster"
|
||||
// @Success 200 {object} model.CDNCluster
|
||||
// @Failure 400
|
||||
// @Failure 404
|
||||
// @Failure 500
|
||||
// @Router /cdn-clusters/{id} [patch]
|
||||
func (h *Handlers) UpdateCDNCluster(ctx *gin.Context) {
|
||||
var params types.CDNClusterParams
|
||||
if err := ctx.ShouldBindUri(¶ms); err != nil {
|
||||
ctx.JSON(http.StatusUnprocessableEntity, gin.H{"errors": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
var json types.UpdateCDNClusterRequest
|
||||
if err := ctx.ShouldBindJSON(&json); err != nil {
|
||||
ctx.JSON(http.StatusUnprocessableEntity, gin.H{"errors": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
cdnCluster, err := h.service.UpdateCDNCluster(ctx.Request.Context(), params.ID, json)
|
||||
if err != nil {
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
ctx.JSON(http.StatusOK, cdnCluster)
|
||||
}
|
||||
|
||||
// @Summary Get CDNCluster
|
||||
// @Description Get CDNCluster by id
|
||||
// @Tags CDNCluster
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
// @Param id path string true "id"
|
||||
// @Success 200 {object} model.CDNCluster
|
||||
// @Failure 400
|
||||
// @Failure 404
|
||||
// @Failure 500
|
||||
// @Router /cdn-clusters/{id} [get]
|
||||
func (h *Handlers) GetCDNCluster(ctx *gin.Context) {
|
||||
var params types.CDNClusterParams
|
||||
if err := ctx.ShouldBindUri(¶ms); err != nil {
|
||||
ctx.JSON(http.StatusUnprocessableEntity, gin.H{"errors": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
cdnCluster, err := h.service.GetCDNCluster(ctx.Request.Context(), params.ID)
|
||||
if err != nil {
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
ctx.JSON(http.StatusOK, cdnCluster)
|
||||
}
|
||||
|
||||
// @Summary Get CDNClusters
|
||||
// @Description Get CDNClusters
|
||||
// @Tags CDNCluster
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
// @Param page query int true "current page" default(0)
|
||||
// @Param per_page query int true "return max item count, default 10, max 50" default(10) minimum(2) maximum(50)
|
||||
// @Success 200 {object} []model.CDNCluster
|
||||
// @Failure 400
|
||||
// @Failure 404
|
||||
// @Failure 500
|
||||
// @Router /cdn-clusters [get]
|
||||
func (h *Handlers) GetCDNClusters(ctx *gin.Context) {
|
||||
var query types.GetCDNClustersQuery
|
||||
if err := ctx.ShouldBindQuery(&query); err != nil {
|
||||
ctx.JSON(http.StatusUnprocessableEntity, gin.H{"errors": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
h.setPaginationDefault(&query.Page, &query.PerPage)
|
||||
cdns, count, err := h.service.GetCDNClusters(ctx.Request.Context(), query)
|
||||
if err != nil {
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
h.setPaginationLinkHeader(ctx, query.Page, query.PerPage, int(count))
|
||||
ctx.JSON(http.StatusOK, cdns)
|
||||
}
|
||||
|
||||
// @Summary Add Instance to CDNCluster
|
||||
// @Description Add CDN to CDNCluster
|
||||
// @Tags CDNCluster
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
// @Param id path string true "id"
|
||||
// @Param cdn_id path string true "cdn id"
|
||||
// @Success 200
|
||||
// @Failure 400
|
||||
// @Failure 404
|
||||
// @Failure 500
|
||||
// @Router /cdn-clusters/{id}/cdns/{cdn_id} [put]
|
||||
func (h *Handlers) AddCDNToCDNCluster(ctx *gin.Context) {
|
||||
var params types.AddCDNToCDNClusterParams
|
||||
if err := ctx.ShouldBindUri(¶ms); err != nil {
|
||||
ctx.JSON(http.StatusUnprocessableEntity, gin.H{"errors": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
if err := h.service.AddCDNToCDNCluster(ctx.Request.Context(), params.ID, params.CDNID); err != nil {
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
ctx.Status(http.StatusOK)
|
||||
}
|
||||
|
||||
// @Summary Add SchedulerCluster to CDNCluster
|
||||
// @Description Add SchedulerCluster to CDNCluster
|
||||
// @Tags CDNCluster
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
// @Param id path string true "id"
|
||||
// @Param scheduler_cluster_id path string true "scheduler cluster id"
|
||||
// @Success 200
|
||||
// @Failure 400
|
||||
// @Failure 404
|
||||
// @Failure 500
|
||||
// @Router /cdn-clusters/{id}/scheduler-clusters/{scheduler_cluster_id} [put]
|
||||
func (h *Handlers) AddSchedulerClusterToCDNCluster(ctx *gin.Context) {
|
||||
var params types.AddSchedulerClusterToCDNClusterParams
|
||||
if err := ctx.ShouldBindUri(¶ms); err != nil {
|
||||
ctx.JSON(http.StatusUnprocessableEntity, gin.H{"errors": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
if err := h.service.AddSchedulerClusterToCDNCluster(ctx.Request.Context(), params.ID, params.SchedulerClusterID); err != nil {
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
ctx.Status(http.StatusOK)
|
||||
}
|
||||
|
|
@ -226,34 +226,6 @@ func (h *Handlers) AddSeedPeerClusterToSecurityGroup(ctx *gin.Context) {
|
|||
ctx.Status(http.StatusOK)
|
||||
}
|
||||
|
||||
// @Summary Add CDN to SecurityGroup
|
||||
// @Description Add CDN to SecurityGroup
|
||||
// @Tags SecurityGroup
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
// @Param id path string true "id"
|
||||
// @Param cdn_cluster_id path string true "cdn cluster id"
|
||||
// @Success 200
|
||||
// @Failure 400
|
||||
// @Failure 404
|
||||
// @Failure 500
|
||||
// @Router /security-groups/{id}/cdn-clusters/{cdn_cluster_id} [put]
|
||||
func (h *Handlers) AddCDNClusterToSecurityGroup(ctx *gin.Context) {
|
||||
var params types.AddCDNClusterToSecurityGroupParams
|
||||
if err := ctx.ShouldBindUri(¶ms); err != nil {
|
||||
ctx.JSON(http.StatusUnprocessableEntity, gin.H{"errors": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
err := h.service.AddCDNClusterToSecurityGroup(ctx.Request.Context(), params.ID, params.CDNClusterID)
|
||||
if err != nil {
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
ctx.Status(http.StatusOK)
|
||||
}
|
||||
|
||||
// @Summary Add SecurityRule to SecurityGroup
|
||||
// @Description Add SecurityRule to SecurityGroup
|
||||
// @Tags SecurityGroup
|
||||
|
|
|
|||
|
|
@ -26,6 +26,5 @@ type Application struct {
|
|||
UserID uint `gorm:"comment:user id" json:"user_id"`
|
||||
User User `json:"user"`
|
||||
SeedPeerClusters []SeedPeerCluster `json:"seed_peer_clusters"`
|
||||
CDNClusters []CDNCluster `json:"cdn_clusters"`
|
||||
SchedulerClusters []SchedulerCluster `json:"scheduler_clusters"`
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,35 +0,0 @@
|
|||
/*
|
||||
* Copyright 2020 The Dragonfly Authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package model
|
||||
|
||||
const (
|
||||
CDNStateActive = "active"
|
||||
CDNStateInactive = "inactive"
|
||||
)
|
||||
|
||||
type CDN struct {
|
||||
Model
|
||||
HostName string `gorm:"column:host_name;type:varchar(256);index:uk_cdn,unique;not null;comment:hostname" json:"host_name"`
|
||||
IDC string `gorm:"column:idc;type:varchar(1024);comment:internet data center" json:"idc"`
|
||||
Location string `gorm:"column:location;type:varchar(1024);comment:location" json:"location"`
|
||||
IP string `gorm:"column:ip;type:varchar(256);not null;comment:ip address" json:"ip"`
|
||||
Port int32 `gorm:"column:port;not null;comment:grpc service listening port" json:"port"`
|
||||
DownloadPort int32 `gorm:"column:download_port;not null;comment:download service listening port" json:"download_port"`
|
||||
State string `gorm:"column:state;type:varchar(256);default:'inactive';comment:service state" json:"state"`
|
||||
CDNClusterID uint `gorm:"index:uk_cdn,unique;not null;comment:cdn cluster id"`
|
||||
CDNCluster CDNCluster `json:"-"`
|
||||
}
|
||||
|
|
@ -1,32 +0,0 @@
|
|||
/*
|
||||
* Copyright 2020 The Dragonfly Authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package model
|
||||
|
||||
type CDNCluster struct {
|
||||
Model
|
||||
Name string `gorm:"column:name;type:varchar(256);index:uk_cdn_cluster_name,unique;not null;comment:name" json:"name"`
|
||||
BIO string `gorm:"column:bio;type:varchar(1024);comment:biography" json:"bio"`
|
||||
Config JSONMap `gorm:"column:config;not null;comment:configuration" json:"config"`
|
||||
SchedulerClusters []SchedulerCluster `gorm:"many2many:cdn_cluster_scheduler_cluster;" json:"scheduler_clusters"`
|
||||
IsDefault bool `gorm:"column:is_default;not null;default:false;comment:default cdn cluster" json:"is_default"`
|
||||
CDNs []CDN `json:"-"`
|
||||
ApplicationID uint `gorm:"comment:application id" json:"application_id"`
|
||||
Application Application `json:"-"`
|
||||
SecurityGroupID uint `gorm:"comment:security group id" json:"security_group_id"`
|
||||
SecurityGroup SecurityGroup `json:"-"`
|
||||
Jobs []Job `gorm:"many2many:job_cdn_cluster;" json:"jobs"`
|
||||
}
|
||||
|
|
@ -27,6 +27,5 @@ type Job struct {
|
|||
UserID uint `gorm:"column:user_id;comment:user id" json:"user_id"`
|
||||
User User `json:"-"`
|
||||
SeedPeerClusters []SeedPeerCluster `gorm:"many2many:job_seed_peer_cluster;" json:"seed_peer_clusters"`
|
||||
CDNClusters []CDNCluster `gorm:"many2many:job_cdn_cluster;" json:"cdn_clusters"`
|
||||
SchedulerClusters []SchedulerCluster `gorm:"many2many:job_scheduler_cluster;" json:"scheduler_clusters"`
|
||||
}
|
||||
|
|
|
|||
|
|
@ -25,7 +25,6 @@ type SchedulerCluster struct {
|
|||
Scopes JSONMap `gorm:"column:scopes;comment:match scopes" json:"scopes"`
|
||||
IsDefault bool `gorm:"column:is_default;not null;default:false;comment:default scheduler cluster" json:"is_default"`
|
||||
SeedPeerClusters []SeedPeerCluster `gorm:"many2many:seed_peer_cluster_scheduler_cluster;" json:"seed_peer_clusters"`
|
||||
CDNClusters []CDNCluster `gorm:"many2many:cdn_cluster_scheduler_cluster;" json:"cdn_clusters"`
|
||||
Schedulers []Scheduler `json:"-"`
|
||||
ApplicationID uint `gorm:"comment:application id" json:"application_id"`
|
||||
Application Application `json:"-"`
|
||||
|
|
|
|||
|
|
@ -22,6 +22,5 @@ type SecurityGroup struct {
|
|||
BIO string `gorm:"column:bio;type:varchar(1024);comment:biography" json:"bio"`
|
||||
SecurityRules []SecurityRule `gorm:"many2many:security_group_security_rule;" json:"security_rules"`
|
||||
SeedPeerClusters []SeedPeerCluster `json:"-"`
|
||||
CDNClusters []CDNCluster `json:"-"`
|
||||
SchedulerClusters []SchedulerCluster `json:"-"`
|
||||
}
|
||||
|
|
|
|||
|
|
@ -22,15 +22,16 @@ const (
|
|||
)
|
||||
|
||||
const (
|
||||
SeedPeerTypeSuperSeed = "SuperSeed"
|
||||
SeedPeerTypeStrongSeed = "StrongSeed"
|
||||
SeedPeerTypeWeakSeed = "WeakSeed"
|
||||
SeedPeerTypeSuperSeed = "super"
|
||||
SeedPeerTypeStrongSeed = "strong"
|
||||
SeedPeerTypeWeakSeed = "weak"
|
||||
)
|
||||
|
||||
type SeedPeer struct {
|
||||
Model
|
||||
HostName string `gorm:"column:host_name;type:varchar(256);index:uk_seed_peer,unique;not null;comment:hostname" json:"host_name"`
|
||||
Type string `gorm:"column:type;type:varchar(256);comment:type" json:"type"`
|
||||
IsCDN bool `gorm:"column:is_cdn;not null;default:false;comment:cdn seed peer" json:"is_cdn"`
|
||||
IDC string `gorm:"column:idc;type:varchar(1024);comment:internet data center" json:"idc"`
|
||||
NetTopology string `gorm:"column:net_topology;type:varchar(1024);comment:network topology" json:"net_topology"`
|
||||
Location string `gorm:"column:location;type:varchar(1024);comment:location" json:"location"`
|
||||
|
|
|
|||
|
|
@ -156,8 +156,6 @@ func Init(cfg *config.Config, logDir string, service service.Service, enforcer *
|
|||
cs.DELETE(":id/scheduler-clusters/:scheduler_cluster_id", h.DeleteSchedulerClusterToApplication)
|
||||
cs.PUT(":id/seed-peer-clusters/:seed_peer_cluster_id", h.AddSeedPeerClusterToApplication)
|
||||
cs.DELETE(":id/seed-peer-clusters/:seed_peer_cluster_id", h.DeleteSeedPeerClusterToApplication)
|
||||
cs.PUT(":id/cdn-clusters/:cdn_cluster_id", h.AddCDNClusterToApplication)
|
||||
cs.DELETE(":id/cdn-clusters/:cdn_cluster_id", h.DeleteCDNClusterToApplication)
|
||||
|
||||
// Seed Peer Cluster
|
||||
spc := apiv1.Group("/seed-peer-clusters", jwt.MiddlewareFunc(), rbac)
|
||||
|
|
@ -177,24 +175,6 @@ func Init(cfg *config.Config, logDir string, service service.Service, enforcer *
|
|||
sp.GET(":id", h.GetSeedPeer)
|
||||
sp.GET("", h.GetSeedPeers)
|
||||
|
||||
// CDN Cluster
|
||||
cc := apiv1.Group("/cdn-clusters", jwt.MiddlewareFunc(), rbac)
|
||||
cc.POST("", h.CreateCDNCluster)
|
||||
cc.DELETE(":id", h.DestroyCDNCluster)
|
||||
cc.PATCH(":id", h.UpdateCDNCluster)
|
||||
cc.GET(":id", h.GetCDNCluster)
|
||||
cc.GET("", h.GetCDNClusters)
|
||||
cc.PUT(":id/cdns/:cdn_id", h.AddCDNToCDNCluster)
|
||||
cc.PUT(":id/scheduler-clusters/:scheduler_cluster_id", h.AddSchedulerClusterToCDNCluster)
|
||||
|
||||
// CDN
|
||||
c := apiv1.Group("/cdns", jwt.MiddlewareFunc(), rbac)
|
||||
c.POST("", h.CreateCDN)
|
||||
c.DELETE(":id", h.DestroyCDN)
|
||||
c.PATCH(":id", h.UpdateCDN)
|
||||
c.GET(":id", h.GetCDN)
|
||||
c.GET("", h.GetCDNs)
|
||||
|
||||
// Security Rule
|
||||
sr := apiv1.Group("/security-rules", jwt.MiddlewareFunc(), rbac)
|
||||
sr.POST("", h.CreateSecurityRule)
|
||||
|
|
@ -212,7 +192,6 @@ func Init(cfg *config.Config, logDir string, service service.Service, enforcer *
|
|||
sg.GET("", h.GetSecurityGroups)
|
||||
sg.PUT(":id/scheduler-clusters/:scheduler_cluster_id", h.AddSchedulerClusterToSecurityGroup)
|
||||
sg.PUT(":id/seed-peer-clusters/:seed_peer_cluster_id", h.AddSeedPeerClusterToSecurityGroup)
|
||||
sg.PUT(":id/cdn-clusters/:cdn_cluster_id", h.AddCDNClusterToSecurityGroup)
|
||||
sg.PUT(":id/security-rules/:security_rule_id", h.AddSecurityRuleToSecurityGroup)
|
||||
sg.DELETE(":id/security-rules/:security_rule_id", h.DestroySecurityRuleToSecurityGroup)
|
||||
|
||||
|
|
|
|||
|
|
@ -147,6 +147,7 @@ func (s *Server) GetSeedPeer(ctx context.Context, req *manager.GetSeedPeerReques
|
|||
pbSeedPeer = manager.SeedPeer{
|
||||
Id: uint64(seedPeer.ID),
|
||||
Type: seedPeer.Type,
|
||||
IsCdn: seedPeer.IsCDN,
|
||||
HostName: seedPeer.HostName,
|
||||
Idc: seedPeer.IDC,
|
||||
NetTopology: seedPeer.NetTopology,
|
||||
|
|
@ -193,6 +194,7 @@ func (s *Server) UpdateSeedPeer(ctx context.Context, req *manager.UpdateSeedPeer
|
|||
|
||||
if err := s.db.WithContext(ctx).Model(&seedPeer).Updates(model.SeedPeer{
|
||||
Type: req.Type,
|
||||
IsCDN: req.IsCdn,
|
||||
IDC: req.Idc,
|
||||
NetTopology: req.NetTopology,
|
||||
Location: req.Location,
|
||||
|
|
@ -215,6 +217,7 @@ func (s *Server) UpdateSeedPeer(ctx context.Context, req *manager.UpdateSeedPeer
|
|||
Id: uint64(seedPeer.ID),
|
||||
HostName: seedPeer.HostName,
|
||||
Type: seedPeer.Type,
|
||||
IsCdn: seedPeer.IsCDN,
|
||||
Idc: seedPeer.IDC,
|
||||
NetTopology: seedPeer.NetTopology,
|
||||
Location: seedPeer.Location,
|
||||
|
|
@ -231,6 +234,7 @@ func (s *Server) createSeedPeer(ctx context.Context, req *manager.UpdateSeedPeer
|
|||
seedPeer := model.SeedPeer{
|
||||
HostName: req.HostName,
|
||||
Type: req.Type,
|
||||
IsCDN: req.IsCdn,
|
||||
IDC: req.Idc,
|
||||
NetTopology: req.NetTopology,
|
||||
Location: req.Location,
|
||||
|
|
@ -248,6 +252,7 @@ func (s *Server) createSeedPeer(ctx context.Context, req *manager.UpdateSeedPeer
|
|||
Id: uint64(seedPeer.ID),
|
||||
HostName: seedPeer.HostName,
|
||||
Type: seedPeer.Type,
|
||||
IsCdn: seedPeer.IsCDN,
|
||||
Idc: seedPeer.IDC,
|
||||
NetTopology: seedPeer.NetTopology,
|
||||
Location: seedPeer.Location,
|
||||
|
|
@ -259,157 +264,6 @@ func (s *Server) createSeedPeer(ctx context.Context, req *manager.UpdateSeedPeer
|
|||
}, nil
|
||||
}
|
||||
|
||||
// Deprecated: Use GetSeedPeer instead.
|
||||
func (s *Server) GetCDN(ctx context.Context, req *manager.GetCDNRequest) (*manager.CDN, error) {
|
||||
var pbCDN manager.CDN
|
||||
cacheKey := cache.MakeCDNCacheKey(req.HostName, uint(req.CdnClusterId))
|
||||
|
||||
// Cache hit.
|
||||
if err := s.cache.Get(ctx, cacheKey, &pbCDN); err == nil {
|
||||
logger.Infof("%s cache hit", cacheKey)
|
||||
return &pbCDN, nil
|
||||
}
|
||||
|
||||
// Cache miss.
|
||||
logger.Infof("%s cache miss", cacheKey)
|
||||
cdn := model.CDN{}
|
||||
if err := s.db.WithContext(ctx).Preload("CDNCluster").Preload("CDNCluster.SchedulerClusters.Schedulers", &model.Scheduler{
|
||||
State: model.SchedulerStateActive,
|
||||
}).First(&cdn, &model.CDN{
|
||||
HostName: req.HostName,
|
||||
CDNClusterID: uint(req.CdnClusterId),
|
||||
}).Error; err != nil {
|
||||
return nil, status.Error(codes.Unknown, err.Error())
|
||||
}
|
||||
|
||||
// Marshal config of cdn cluster.
|
||||
config, err := cdn.CDNCluster.Config.MarshalJSON()
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.DataLoss, err.Error())
|
||||
}
|
||||
|
||||
// Construct schedulers.
|
||||
var pbSchedulers []*manager.Scheduler
|
||||
for _, schedulerCluster := range cdn.CDNCluster.SchedulerClusters {
|
||||
for _, scheduler := range schedulerCluster.Schedulers {
|
||||
pbSchedulers = append(pbSchedulers, &manager.Scheduler{
|
||||
Id: uint64(scheduler.ID),
|
||||
HostName: scheduler.HostName,
|
||||
Idc: scheduler.IDC,
|
||||
Location: scheduler.Location,
|
||||
Ip: scheduler.IP,
|
||||
Port: scheduler.Port,
|
||||
State: scheduler.State,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Construct cdn.
|
||||
pbCDN = manager.CDN{
|
||||
Id: uint64(cdn.ID),
|
||||
HostName: cdn.HostName,
|
||||
Idc: cdn.IDC,
|
||||
Location: cdn.Location,
|
||||
Ip: cdn.IP,
|
||||
Port: cdn.Port,
|
||||
DownloadPort: cdn.DownloadPort,
|
||||
State: cdn.State,
|
||||
CdnClusterId: uint64(cdn.CDNClusterID),
|
||||
CdnCluster: &manager.CDNCluster{
|
||||
Id: uint64(cdn.CDNCluster.ID),
|
||||
Name: cdn.CDNCluster.Name,
|
||||
Bio: cdn.CDNCluster.BIO,
|
||||
Config: config,
|
||||
},
|
||||
Schedulers: pbSchedulers,
|
||||
}
|
||||
|
||||
// Cache data.
|
||||
if err := s.cache.Once(&cachev8.Item{
|
||||
Ctx: ctx,
|
||||
Key: cacheKey,
|
||||
Value: &pbCDN,
|
||||
TTL: s.cache.TTL,
|
||||
}); err != nil {
|
||||
logger.Warnf("storage cache failed: %v", err)
|
||||
}
|
||||
|
||||
return &pbCDN, nil
|
||||
}
|
||||
|
||||
// Deprecated: Use UpdateSeedPeer instead.
|
||||
func (s *Server) UpdateCDN(ctx context.Context, req *manager.UpdateCDNRequest) (*manager.CDN, error) {
|
||||
cdn := model.CDN{}
|
||||
if err := s.db.WithContext(ctx).First(&cdn, model.CDN{
|
||||
HostName: req.HostName,
|
||||
CDNClusterID: uint(req.CdnClusterId),
|
||||
}).Error; err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return s.createCDN(ctx, req)
|
||||
}
|
||||
return nil, status.Error(codes.Unknown, err.Error())
|
||||
}
|
||||
|
||||
if err := s.db.WithContext(ctx).Model(&cdn).Updates(model.CDN{
|
||||
IDC: req.Idc,
|
||||
Location: req.Location,
|
||||
IP: req.Ip,
|
||||
Port: req.Port,
|
||||
DownloadPort: req.DownloadPort,
|
||||
CDNClusterID: uint(req.CdnClusterId),
|
||||
}).Error; err != nil {
|
||||
return nil, status.Error(codes.Unknown, err.Error())
|
||||
}
|
||||
|
||||
if err := s.cache.Delete(
|
||||
ctx,
|
||||
cache.MakeCDNCacheKey(cdn.HostName, cdn.CDNClusterID),
|
||||
); err != nil {
|
||||
logger.Warnf("%s refresh keepalive status failed in cdn cluster %d", cdn.HostName, cdn.CDNClusterID)
|
||||
}
|
||||
|
||||
return &manager.CDN{
|
||||
Id: uint64(cdn.ID),
|
||||
HostName: cdn.HostName,
|
||||
Idc: cdn.IDC,
|
||||
Location: cdn.Location,
|
||||
Ip: cdn.IP,
|
||||
Port: cdn.Port,
|
||||
DownloadPort: cdn.DownloadPort,
|
||||
CdnClusterId: uint64(cdn.CDNClusterID),
|
||||
State: cdn.State,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Deprecated: Use createSeedPeer instead.
|
||||
func (s *Server) createCDN(ctx context.Context, req *manager.UpdateCDNRequest) (*manager.CDN, error) {
|
||||
cdn := model.CDN{
|
||||
HostName: req.HostName,
|
||||
IDC: req.Idc,
|
||||
Location: req.Location,
|
||||
IP: req.Ip,
|
||||
Port: req.Port,
|
||||
DownloadPort: req.DownloadPort,
|
||||
CDNClusterID: uint(req.CdnClusterId),
|
||||
}
|
||||
|
||||
if err := s.db.WithContext(ctx).Create(&cdn).Error; err != nil {
|
||||
return nil, status.Error(codes.Unknown, err.Error())
|
||||
}
|
||||
|
||||
return &manager.CDN{
|
||||
Id: uint64(cdn.ID),
|
||||
HostName: cdn.HostName,
|
||||
Idc: cdn.IDC,
|
||||
Location: cdn.Location,
|
||||
Ip: cdn.IP,
|
||||
Port: cdn.Port,
|
||||
DownloadPort: cdn.DownloadPort,
|
||||
CdnClusterId: uint64(cdn.CDNClusterID),
|
||||
State: cdn.State,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Get Scheduler and Scheduler cluster configuration.
|
||||
func (s *Server) GetScheduler(ctx context.Context, req *manager.GetSchedulerRequest) (*manager.Scheduler, error) {
|
||||
var pbScheduler manager.Scheduler
|
||||
|
|
@ -424,9 +278,7 @@ func (s *Server) GetScheduler(ctx context.Context, req *manager.GetSchedulerRequ
|
|||
// Cache miss.
|
||||
logger.Infof("%s cache miss", cacheKey)
|
||||
scheduler := model.Scheduler{}
|
||||
if err := s.db.WithContext(ctx).Preload("SchedulerCluster").Preload("SchedulerCluster.CDNClusters.CDNs", &model.CDN{
|
||||
State: model.CDNStateActive,
|
||||
}).Preload("SchedulerCluster.SeedPeerClusters.SeedPeers", &model.CDN{
|
||||
if err := s.db.WithContext(ctx).Preload("SchedulerCluster").Preload("SchedulerCluster.SeedPeerClusters.SeedPeers", &model.SeedPeer{
|
||||
State: model.SeedPeerStateActive,
|
||||
}).First(&scheduler, &model.Scheduler{
|
||||
HostName: req.HostName,
|
||||
|
|
@ -460,6 +312,7 @@ func (s *Server) GetScheduler(ctx context.Context, req *manager.GetSchedulerRequ
|
|||
Id: uint64(seedPeer.ID),
|
||||
HostName: seedPeer.HostName,
|
||||
Type: seedPeer.Type,
|
||||
IsCdn: seedPeer.IsCDN,
|
||||
Idc: seedPeer.IDC,
|
||||
NetTopology: seedPeer.NetTopology,
|
||||
Location: seedPeer.Location,
|
||||
|
|
@ -478,35 +331,6 @@ func (s *Server) GetScheduler(ctx context.Context, req *manager.GetSchedulerRequ
|
|||
}
|
||||
}
|
||||
|
||||
// Deprecated: Use pbSeedPeers instead.
|
||||
var pbCDNs []*manager.CDN
|
||||
for _, cdnCluster := range scheduler.SchedulerCluster.CDNClusters {
|
||||
cdnClusterConfig, err := cdnCluster.Config.MarshalJSON()
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.DataLoss, err.Error())
|
||||
}
|
||||
|
||||
for _, cdn := range cdnCluster.CDNs {
|
||||
pbCDNs = append(pbCDNs, &manager.CDN{
|
||||
Id: uint64(cdn.ID),
|
||||
HostName: cdn.HostName,
|
||||
Idc: cdn.IDC,
|
||||
Location: cdn.Location,
|
||||
Ip: cdn.IP,
|
||||
Port: cdn.Port,
|
||||
DownloadPort: cdn.DownloadPort,
|
||||
State: cdn.State,
|
||||
CdnClusterId: uint64(cdn.CDNClusterID),
|
||||
CdnCluster: &manager.CDNCluster{
|
||||
Id: uint64(cdnCluster.ID),
|
||||
Name: cdnCluster.Name,
|
||||
Bio: cdnCluster.BIO,
|
||||
Config: cdnClusterConfig,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Construct scheduler.
|
||||
pbScheduler = manager.Scheduler{
|
||||
Id: uint64(scheduler.ID),
|
||||
|
|
@ -526,7 +350,6 @@ func (s *Server) GetScheduler(ctx context.Context, req *manager.GetSchedulerRequ
|
|||
ClientConfig: schedulerClusterClientConfig,
|
||||
},
|
||||
SeedPeers: pbSeedPeers,
|
||||
Cdns: pbCDNs,
|
||||
}
|
||||
|
||||
// Cache data.
|
||||
|
|
@ -639,7 +462,7 @@ func (s *Server) ListSchedulers(ctx context.Context, req *manager.ListSchedulers
|
|||
log.Infof("list scheduler clusters %v with hostInfo %#v", getSchedulerClusterNames(schedulerClusters), req.HostInfo)
|
||||
schedulerClusters, err := s.searcher.FindSchedulerClusters(ctx, schedulerClusters, req)
|
||||
if err != nil {
|
||||
log.Errorf("can not matching scheduler cluster %v", err)
|
||||
log.Error(err)
|
||||
return nil, status.Error(codes.NotFound, "scheduler cluster not found")
|
||||
}
|
||||
log.Infof("find matching scheduler cluster %v", getSchedulerClusterNames(schedulerClusters))
|
||||
|
|
@ -731,26 +554,6 @@ func (s *Server) KeepAlive(stream manager.Manager_KeepAliveServer) error {
|
|||
}
|
||||
}
|
||||
|
||||
// Deprecated: Use SourceType_SEED_PEER_SOURCE instead.
|
||||
if sourceType == manager.SourceType_CDN_SOURCE {
|
||||
cdn := model.CDN{}
|
||||
if err := s.db.First(&cdn, model.CDN{
|
||||
HostName: hostName,
|
||||
CDNClusterID: clusterID,
|
||||
}).Updates(model.CDN{
|
||||
State: model.CDNStateActive,
|
||||
}).Error; err != nil {
|
||||
return status.Error(codes.Unknown, err.Error())
|
||||
}
|
||||
|
||||
if err := s.cache.Delete(
|
||||
context.TODO(),
|
||||
cache.MakeCDNCacheKey(hostName, clusterID),
|
||||
); err != nil {
|
||||
logger.Warnf("%s refresh keepalive status failed in cdn cluster %d", hostName, clusterID)
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
_, err := stream.Recv()
|
||||
if err != nil {
|
||||
|
|
@ -794,26 +597,6 @@ func (s *Server) KeepAlive(stream manager.Manager_KeepAliveServer) error {
|
|||
}
|
||||
}
|
||||
|
||||
// Deprecated: Use SourceType_SEED_PEER_SOURCE instead.
|
||||
if sourceType == manager.SourceType_CDN_SOURCE {
|
||||
cdn := model.CDN{}
|
||||
if err := s.db.First(&cdn, model.CDN{
|
||||
HostName: hostName,
|
||||
CDNClusterID: clusterID,
|
||||
}).Updates(model.CDN{
|
||||
State: model.CDNStateInactive,
|
||||
}).Error; err != nil {
|
||||
return status.Error(codes.Unknown, err.Error())
|
||||
}
|
||||
|
||||
if err := s.cache.Delete(
|
||||
context.TODO(),
|
||||
cache.MakeCDNCacheKey(hostName, clusterID),
|
||||
); err != nil {
|
||||
logger.Warnf("%s refresh keepalive status failed in cdn cluster %d", hostName, clusterID)
|
||||
}
|
||||
}
|
||||
|
||||
if err == io.EOF {
|
||||
logger.Infof("%s close keepalive in cluster %d", hostName, clusterID)
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -110,7 +110,7 @@ func (s *searcher) FindSchedulerClusters(ctx context.Context, schedulerClusters
|
|||
|
||||
clusters := FilterSchedulerClusters(conditions, schedulerClusters)
|
||||
if len(clusters) == 0 {
|
||||
return nil, fmt.Errorf("security domain %s does not match any scheduler cluster", conditions[ConditionSecurityDomain])
|
||||
return nil, fmt.Errorf("conditions %#v does not match any scheduler cluster", conditions)
|
||||
}
|
||||
|
||||
sort.Slice(
|
||||
|
|
|
|||
|
|
@ -78,7 +78,7 @@ func TestSchedulerCluster(t *testing.T) {
|
|||
conditions: map[string]string{"security_domain": "domain-1"},
|
||||
expect: func(t *testing.T, data []model.SchedulerCluster, err error) {
|
||||
assert := assert.New(t)
|
||||
assert.EqualError(err, "security domain domain-1 does not match any scheduler cluster")
|
||||
assert.Error(err)
|
||||
},
|
||||
},
|
||||
{
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ func (s *service) CreateApplication(ctx context.Context, json types.CreateApplic
|
|||
State: json.State,
|
||||
}
|
||||
|
||||
if err := s.db.WithContext(ctx).Preload("SeedPeerClusters").Preload("CDNClusters").Preload("SchedulerClusters").Preload("User").Create(&application).Error; err != nil {
|
||||
if err := s.db.WithContext(ctx).Preload("SeedPeerClusters").Preload("SchedulerClusters").Preload("User").Create(&application).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
@ -55,7 +55,7 @@ func (s *service) DestroyApplication(ctx context.Context, id uint) error {
|
|||
|
||||
func (s *service) UpdateApplication(ctx context.Context, id uint, json types.UpdateApplicationRequest) (*model.Application, error) {
|
||||
application := model.Application{}
|
||||
if err := s.db.WithContext(ctx).Preload("SeedPeerClusters").Preload("CDNClusters").Preload("SchedulerClusters").Preload("User").First(&application, id).Updates(model.Application{
|
||||
if err := s.db.WithContext(ctx).Preload("SeedPeerClusters").Preload("SchedulerClusters").Preload("User").First(&application, id).Updates(model.Application{
|
||||
Name: json.Name,
|
||||
DownloadRateLimit: json.DownloadRateLimit,
|
||||
URL: json.URL,
|
||||
|
|
@ -71,7 +71,7 @@ func (s *service) UpdateApplication(ctx context.Context, id uint, json types.Upd
|
|||
|
||||
func (s *service) GetApplication(ctx context.Context, id uint) (*model.Application, error) {
|
||||
application := model.Application{}
|
||||
if err := s.db.WithContext(ctx).Preload("SeedPeerClusters").Preload("CDNClusters").Preload("SchedulerClusters").Preload("User").First(&application, id).Error; err != nil {
|
||||
if err := s.db.WithContext(ctx).Preload("SeedPeerClusters").Preload("SchedulerClusters").Preload("User").First(&application, id).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
@ -81,7 +81,7 @@ func (s *service) GetApplication(ctx context.Context, id uint) (*model.Applicati
|
|||
func (s *service) GetApplications(ctx context.Context, q types.GetApplicationsQuery) (*[]model.Application, int64, error) {
|
||||
var count int64
|
||||
applications := []model.Application{}
|
||||
if err := s.db.WithContext(ctx).Scopes(model.Paginate(q.Page, q.PerPage)).Preload("SeedPeerClusters").Preload("CDNClusters").Preload("SchedulerClusters").Preload("User").Find(&applications).Count(&count).Error; err != nil {
|
||||
if err := s.db.WithContext(ctx).Scopes(model.Paginate(q.Page, q.PerPage)).Preload("SeedPeerClusters").Preload("SchedulerClusters").Preload("User").Find(&applications).Count(&count).Error; err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
|
|
@ -159,39 +159,3 @@ func (s *service) DeleteSeedPeerClusterToApplication(ctx context.Context, id, se
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *service) AddCDNClusterToApplication(ctx context.Context, id, cdnClusterID uint) error {
|
||||
application := model.Application{}
|
||||
if err := s.db.WithContext(ctx).First(&application, id).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cdnCluster := model.CDNCluster{}
|
||||
if err := s.db.WithContext(ctx).First(&cdnCluster, cdnClusterID).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.db.WithContext(ctx).Model(&application).Association("CDNClusters").Append(&cdnCluster); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *service) DeleteCDNClusterToApplication(ctx context.Context, id, cdnClusterID uint) error {
|
||||
application := model.Application{}
|
||||
if err := s.db.WithContext(ctx).First(&application, id).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cdnCluster := model.CDNCluster{}
|
||||
if err := s.db.WithContext(ctx).First(&cdnCluster, cdnClusterID).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.db.Model(&application).Association("CDNClusters").Delete(&cdnCluster); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,98 +0,0 @@
|
|||
/*
|
||||
* Copyright 2020 The Dragonfly Authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package service
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"d7y.io/dragonfly/v2/manager/model"
|
||||
"d7y.io/dragonfly/v2/manager/types"
|
||||
)
|
||||
|
||||
func (s *service) CreateCDN(ctx context.Context, json types.CreateCDNRequest) (*model.CDN, error) {
|
||||
cdn := model.CDN{
|
||||
HostName: json.HostName,
|
||||
IDC: json.IDC,
|
||||
Location: json.Location,
|
||||
IP: json.IP,
|
||||
Port: json.Port,
|
||||
DownloadPort: json.DownloadPort,
|
||||
CDNClusterID: json.CDNClusterID,
|
||||
}
|
||||
|
||||
if err := s.db.WithContext(ctx).Create(&cdn).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &cdn, nil
|
||||
}
|
||||
|
||||
func (s *service) DestroyCDN(ctx context.Context, id uint) error {
|
||||
cdn := model.CDN{}
|
||||
if err := s.db.WithContext(ctx).First(&cdn, id).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.db.WithContext(ctx).Unscoped().Delete(&model.CDN{}, id).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *service) UpdateCDN(ctx context.Context, id uint, json types.UpdateCDNRequest) (*model.CDN, error) {
|
||||
cdn := model.CDN{}
|
||||
if err := s.db.WithContext(ctx).First(&cdn, id).Updates(model.CDN{
|
||||
IDC: json.IDC,
|
||||
Location: json.Location,
|
||||
IP: json.IP,
|
||||
Port: json.Port,
|
||||
DownloadPort: json.DownloadPort,
|
||||
CDNClusterID: json.CDNClusterID,
|
||||
}).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &cdn, nil
|
||||
}
|
||||
|
||||
func (s *service) GetCDN(ctx context.Context, id uint) (*model.CDN, error) {
|
||||
cdn := model.CDN{}
|
||||
if err := s.db.WithContext(ctx).First(&cdn, id).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &cdn, nil
|
||||
}
|
||||
|
||||
func (s *service) GetCDNs(ctx context.Context, q types.GetCDNsQuery) (*[]model.CDN, int64, error) {
|
||||
var count int64
|
||||
var cdns []model.CDN
|
||||
if err := s.db.WithContext(ctx).Scopes(model.Paginate(q.Page, q.PerPage)).Where(&model.CDN{
|
||||
HostName: q.HostName,
|
||||
IDC: q.IDC,
|
||||
Location: q.Location,
|
||||
IP: q.IP,
|
||||
Port: q.Port,
|
||||
DownloadPort: q.DownloadPort,
|
||||
CDNClusterID: q.CDNClusterID,
|
||||
}).Find(&cdns).Count(&count).Error; err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
return &cdns, count, nil
|
||||
}
|
||||
|
|
@ -1,148 +0,0 @@
|
|||
/*
|
||||
* Copyright 2020 The Dragonfly Authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package service
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"d7y.io/dragonfly/v2/manager/model"
|
||||
"d7y.io/dragonfly/v2/manager/types"
|
||||
"d7y.io/dragonfly/v2/pkg/util/structutils"
|
||||
)
|
||||
|
||||
func (s *service) CreateCDNCluster(ctx context.Context, json types.CreateCDNClusterRequest) (*model.CDNCluster, error) {
|
||||
config, err := structutils.StructToMap(json.Config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cdnCluster := model.CDNCluster{
|
||||
Name: json.Name,
|
||||
BIO: json.BIO,
|
||||
Config: config,
|
||||
IsDefault: json.IsDefault,
|
||||
}
|
||||
|
||||
if err := s.db.WithContext(ctx).Create(&cdnCluster).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &cdnCluster, nil
|
||||
}
|
||||
|
||||
func (s *service) DestroyCDNCluster(ctx context.Context, id uint) error {
|
||||
cdnCluster := model.CDNCluster{}
|
||||
if err := s.db.WithContext(ctx).Preload("CDNs").First(&cdnCluster, id).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(cdnCluster.CDNs) != 0 {
|
||||
return errors.New("cdn cluster exists cdn")
|
||||
}
|
||||
|
||||
if err := s.db.WithContext(ctx).Unscoped().Delete(&model.CDNCluster{}, id).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *service) UpdateCDNCluster(ctx context.Context, id uint, json types.UpdateCDNClusterRequest) (*model.CDNCluster, error) {
|
||||
config, err := structutils.StructToMap(json.Config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cdnCluster := model.CDNCluster{}
|
||||
if err := s.db.WithContext(ctx).First(&cdnCluster, id).Updates(model.CDNCluster{
|
||||
Name: json.Name,
|
||||
BIO: json.BIO,
|
||||
Config: config,
|
||||
IsDefault: json.IsDefault,
|
||||
}).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &cdnCluster, nil
|
||||
}
|
||||
|
||||
func (s *service) GetCDNCluster(ctx context.Context, id uint) (*model.CDNCluster, error) {
|
||||
cdnCluster := model.CDNCluster{}
|
||||
if err := s.db.WithContext(ctx).First(&cdnCluster, id).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &cdnCluster, nil
|
||||
}
|
||||
|
||||
func (s *service) GetCDNClusters(ctx context.Context, q types.GetCDNClustersQuery) (*[]model.CDNCluster, int64, error) {
|
||||
var count int64
|
||||
var cdnClusters []model.CDNCluster
|
||||
if err := s.db.WithContext(ctx).Scopes(model.Paginate(q.Page, q.PerPage)).Where(&model.CDNCluster{
|
||||
Name: q.Name,
|
||||
}).Find(&cdnClusters).Count(&count).Error; err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
return &cdnClusters, count, nil
|
||||
}
|
||||
|
||||
func (s *service) AddCDNToCDNCluster(ctx context.Context, id, cdnID uint) error {
|
||||
cdnCluster := model.CDNCluster{}
|
||||
if err := s.db.WithContext(ctx).First(&cdnCluster, id).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cdn := model.CDN{}
|
||||
if err := s.db.WithContext(ctx).First(&cdn, cdnID).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.db.WithContext(ctx).Model(&cdnCluster).Association("CDNs").Append(&cdn); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *service) AddSchedulerClusterToCDNCluster(ctx context.Context, id, schedulerClusterID uint) error {
|
||||
cdnCluster := model.CDNCluster{}
|
||||
if err := s.db.WithContext(ctx).First(&cdnCluster, id).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
schedulerCluster := model.SchedulerCluster{}
|
||||
if err := s.db.WithContext(ctx).First(&schedulerCluster, schedulerClusterID).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cdnClusters := []model.CDNCluster{}
|
||||
if err := s.db.WithContext(ctx).Model(&schedulerCluster).Association("CDNClusters").Find(&cdnClusters); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.db.WithContext(ctx).Model(&schedulerCluster).Association("CDNClusters").Delete(cdnClusters); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.db.WithContext(ctx).Model(&cdnCluster).Association("SchedulerClusters").Append(&schedulerCluster); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
@ -155,7 +155,7 @@ func (s *service) DestroyJob(ctx context.Context, id uint) error {
|
|||
|
||||
func (s *service) UpdateJob(ctx context.Context, id uint, json types.UpdateJobRequest) (*model.Job, error) {
|
||||
job := model.Job{}
|
||||
if err := s.db.WithContext(ctx).Preload("SeedPeerClusters").Preload("CDNClusters").Preload("SchedulerClusters").First(&job, id).Updates(model.Job{
|
||||
if err := s.db.WithContext(ctx).Preload("SeedPeerClusters").Preload("SchedulerClusters").First(&job, id).Updates(model.Job{
|
||||
BIO: json.BIO,
|
||||
UserID: json.UserID,
|
||||
}).Error; err != nil {
|
||||
|
|
@ -231,25 +231,3 @@ func (s *service) AddJobToSeedPeerClusters(ctx context.Context, id, seedPeerClus
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *service) AddJobToCDNClusters(ctx context.Context, id, cdnClusterIDs []uint) error {
|
||||
job := model.Job{}
|
||||
if err := s.db.WithContext(ctx).First(&job, id).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var cdnClusters []*model.CDNCluster
|
||||
for _, cdnClusterID := range cdnClusterIDs {
|
||||
cdnCluster := model.CDNCluster{}
|
||||
if err := s.db.WithContext(ctx).First(&cdnCluster, cdnClusterID).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
cdnClusters = append(cdnClusters, &cdnCluster)
|
||||
}
|
||||
|
||||
if err := s.db.WithContext(ctx).Model(&job).Association("CDNClusters").Append(cdnClusters); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -60,12 +60,6 @@ func (s *service) CreateSchedulerCluster(ctx context.Context, json types.CreateS
|
|||
}
|
||||
}
|
||||
|
||||
if json.CDNClusterID > 0 {
|
||||
if err := s.AddSchedulerClusterToCDNCluster(ctx, json.CDNClusterID, schedulerCluster.ID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &schedulerCluster, nil
|
||||
}
|
||||
|
||||
|
|
@ -120,18 +114,12 @@ func (s *service) UpdateSchedulerCluster(ctx context.Context, id uint, json type
|
|||
}
|
||||
}
|
||||
|
||||
if json.CDNClusterID > 0 {
|
||||
if err := s.AddSchedulerClusterToCDNCluster(ctx, json.CDNClusterID, schedulerCluster.ID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &schedulerCluster, nil
|
||||
}
|
||||
|
||||
func (s *service) GetSchedulerCluster(ctx context.Context, id uint) (*model.SchedulerCluster, error) {
|
||||
schedulerCluster := model.SchedulerCluster{}
|
||||
if err := s.db.WithContext(ctx).Preload("SeedPeerClusters").Preload("CDNClusters").First(&schedulerCluster, id).Error; err != nil {
|
||||
if err := s.db.WithContext(ctx).Preload("SeedPeerClusters").First(&schedulerCluster, id).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
@ -143,7 +131,7 @@ func (s *service) GetSchedulerClusters(ctx context.Context, q types.GetScheduler
|
|||
var schedulerClusters []model.SchedulerCluster
|
||||
if err := s.db.WithContext(ctx).Scopes(model.Paginate(q.Page, q.PerPage)).Where(&model.SchedulerCluster{
|
||||
Name: q.Name,
|
||||
}).Preload("SeedPeerClusters").Preload("CDNClusters").Find(&schedulerClusters).Count(&count).Error; err != nil {
|
||||
}).Preload("SeedPeerClusters").Find(&schedulerClusters).Count(&count).Error; err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -118,24 +118,6 @@ func (s *service) AddSeedPeerClusterToSecurityGroup(ctx context.Context, id, see
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *service) AddCDNClusterToSecurityGroup(ctx context.Context, id, cdnClusterID uint) error {
|
||||
securityGroup := model.SecurityGroup{}
|
||||
if err := s.db.WithContext(ctx).First(&securityGroup, id).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cdnCluster := model.CDNCluster{}
|
||||
if err := s.db.WithContext(ctx).First(&cdnCluster, cdnClusterID).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.db.WithContext(ctx).Model(&securityGroup).Association("CDNClusters").Append(&cdnCluster); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *service) AddSecurityRuleToSecurityGroup(ctx context.Context, id, securityRuleID uint) error {
|
||||
securityGroup := model.SecurityGroup{}
|
||||
if err := s.db.WithContext(ctx).First(&securityGroup, id).Error; err != nil {
|
||||
|
|
|
|||
|
|
@ -74,20 +74,6 @@ type Service interface {
|
|||
GetSeedPeer(context.Context, uint) (*model.SeedPeer, error)
|
||||
GetSeedPeers(context.Context, types.GetSeedPeersQuery) (*[]model.SeedPeer, int64, error)
|
||||
|
||||
CreateCDNCluster(context.Context, types.CreateCDNClusterRequest) (*model.CDNCluster, error)
|
||||
DestroyCDNCluster(context.Context, uint) error
|
||||
UpdateCDNCluster(context.Context, uint, types.UpdateCDNClusterRequest) (*model.CDNCluster, error)
|
||||
GetCDNCluster(context.Context, uint) (*model.CDNCluster, error)
|
||||
GetCDNClusters(context.Context, types.GetCDNClustersQuery) (*[]model.CDNCluster, int64, error)
|
||||
AddCDNToCDNCluster(context.Context, uint, uint) error
|
||||
AddSchedulerClusterToCDNCluster(context.Context, uint, uint) error
|
||||
|
||||
CreateCDN(context.Context, types.CreateCDNRequest) (*model.CDN, error)
|
||||
DestroyCDN(context.Context, uint) error
|
||||
UpdateCDN(context.Context, uint, types.UpdateCDNRequest) (*model.CDN, error)
|
||||
GetCDN(context.Context, uint) (*model.CDN, error)
|
||||
GetCDNs(context.Context, types.GetCDNsQuery) (*[]model.CDN, int64, error)
|
||||
|
||||
CreateSchedulerCluster(context.Context, types.CreateSchedulerClusterRequest) (*model.SchedulerCluster, error)
|
||||
DestroySchedulerCluster(context.Context, uint) error
|
||||
UpdateSchedulerCluster(context.Context, uint, types.UpdateSchedulerClusterRequest) (*model.SchedulerCluster, error)
|
||||
|
|
@ -114,7 +100,6 @@ type Service interface {
|
|||
GetSecurityGroups(context.Context, types.GetSecurityGroupsQuery) (*[]model.SecurityGroup, int64, error)
|
||||
AddSchedulerClusterToSecurityGroup(context.Context, uint, uint) error
|
||||
AddSeedPeerClusterToSecurityGroup(context.Context, uint, uint) error
|
||||
AddCDNClusterToSecurityGroup(context.Context, uint, uint) error
|
||||
AddSecurityRuleToSecurityGroup(context.Context, uint, uint) error
|
||||
DestroySecurityRuleToSecurityGroup(context.Context, uint, uint) error
|
||||
|
||||
|
|
@ -142,8 +127,6 @@ type Service interface {
|
|||
DeleteSchedulerClusterToApplication(context.Context, uint, uint) error
|
||||
AddSeedPeerClusterToApplication(context.Context, uint, uint) error
|
||||
DeleteSeedPeerClusterToApplication(context.Context, uint, uint) error
|
||||
AddCDNClusterToApplication(context.Context, uint, uint) error
|
||||
DeleteCDNClusterToApplication(context.Context, uint, uint) error
|
||||
}
|
||||
|
||||
type service struct {
|
||||
|
|
|
|||
|
|
@ -40,16 +40,6 @@ type DeleteSeedPeerClusterToApplicationParams struct {
|
|||
SeedPeerClusterID uint `uri:"seed_peer_cluster_id" binding:"required"`
|
||||
}
|
||||
|
||||
type AddCDNClusterToApplicationParams struct {
|
||||
ID uint `uri:"id" binding:"required"`
|
||||
CDNClusterID uint `uri:"cdn_cluster_id" binding:"required"`
|
||||
}
|
||||
|
||||
type DeleteCDNClusterToApplicationParams struct {
|
||||
ID uint `uri:"id" binding:"required"`
|
||||
CDNClusterID uint `uri:"cdn_cluster_id" binding:"required"`
|
||||
}
|
||||
|
||||
type CreateApplicationRequest struct {
|
||||
Name string `json:"name" binding:"required"`
|
||||
BIO string `json:"bio" binding:"omitempty"`
|
||||
|
|
|
|||
|
|
@ -1,53 +0,0 @@
|
|||
/*
|
||||
* Copyright 2020 The Dragonfly Authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package types
|
||||
|
||||
type CDNParams struct {
|
||||
ID uint `uri:"id" binding:"required"`
|
||||
}
|
||||
|
||||
type CreateCDNRequest struct {
|
||||
HostName string `json:"host_name" binding:"required"`
|
||||
IDC string `json:"idc" binding:"omitempty"`
|
||||
Location string `json:"location" binding:"omitempty"`
|
||||
IP string `json:"ip" binding:"required"`
|
||||
Port int32 `json:"port" binding:"required"`
|
||||
DownloadPort int32 `json:"download_port" binding:"required"`
|
||||
CDNClusterID uint `json:"cdn_cluster_id" binding:"required"`
|
||||
}
|
||||
|
||||
type UpdateCDNRequest struct {
|
||||
IDC string `json:"idc" binding:"omitempty"`
|
||||
Location string `json:"location" binding:"omitempty"`
|
||||
IP string `json:"ip" binding:"omitempty"`
|
||||
Port int32 `json:"port" binding:"omitempty"`
|
||||
DownloadPort int32 `json:"download_port" binding:"omitempty"`
|
||||
CDNClusterID uint `json:"cdn_cluster_id" binding:"omitempty"`
|
||||
}
|
||||
|
||||
type GetCDNsQuery struct {
|
||||
HostName string `form:"host_name" binding:"omitempty"`
|
||||
IDC string `form:"idc" binding:"omitempty"`
|
||||
Location string `form:"location" binding:"omitempty"`
|
||||
IP string `form:"ip" binding:"omitempty"`
|
||||
Port int32 `form:"port" binding:"omitempty"`
|
||||
DownloadPort int32 `form:"download_port" binding:"omitempty"`
|
||||
CDNClusterID uint `form:"cdn_cluster_id" binding:"omitempty"`
|
||||
Page int `form:"page" binding:"omitempty,gte=1"`
|
||||
PerPage int `form:"per_page" binding:"omitempty,gte=1,lte=50"`
|
||||
State string `form:"state" binding:"omitempty,oneof=active inactive"`
|
||||
}
|
||||
|
|
@ -1,56 +0,0 @@
|
|||
/*
|
||||
* Copyright 2020 The Dragonfly Authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package types
|
||||
|
||||
type CDNClusterParams struct {
|
||||
ID uint `uri:"id" binding:"required"`
|
||||
}
|
||||
|
||||
type AddCDNToCDNClusterParams struct {
|
||||
ID uint `uri:"id" binding:"required"`
|
||||
CDNID uint `uri:"cdn_id" binding:"required"`
|
||||
}
|
||||
|
||||
type AddSchedulerClusterToCDNClusterParams struct {
|
||||
ID uint `uri:"id" binding:"required"`
|
||||
SchedulerClusterID uint `uri:"scheduler_cluster_id" binding:"required"`
|
||||
}
|
||||
|
||||
type CreateCDNClusterRequest struct {
|
||||
Name string `json:"name" binding:"required"`
|
||||
BIO string `json:"bio" binding:"omitempty"`
|
||||
Config *CDNClusterConfig `json:"config" binding:"required"`
|
||||
IsDefault bool `json:"is_default" binding:"omitempty"`
|
||||
}
|
||||
|
||||
type UpdateCDNClusterRequest struct {
|
||||
Name string `json:"name" binding:"omitempty"`
|
||||
BIO string `json:"bio" binding:"omitempty"`
|
||||
Config *CDNClusterConfig `json:"config" binding:"omitempty"`
|
||||
IsDefault bool `json:"is_default" binding:"omitempty"`
|
||||
}
|
||||
|
||||
type GetCDNClustersQuery struct {
|
||||
Name string `form:"name" binding:"omitempty"`
|
||||
Page int `form:"page" binding:"omitempty,gte=1"`
|
||||
PerPage int `form:"per_page" binding:"omitempty,gte=1,lte=50"`
|
||||
}
|
||||
|
||||
type CDNClusterConfig struct {
|
||||
LoadLimit uint32 `yaml:"loadLimit" mapstructure:"loadLimit" json:"load_limit" binding:"omitempty,gte=1,lte=5000"`
|
||||
NetTopology string `yaml:"netTopology" mapstructure:"netTopology" json:"net_topology"`
|
||||
}
|
||||
|
|
@ -23,7 +23,6 @@ type CreateJobRequest struct {
|
|||
Result map[string]interface{} `json:"result" binding:"omitempty"`
|
||||
UserID uint `json:"user_id" binding:"omitempty"`
|
||||
SeedPeerClusterIDs []uint `json:"seed_peer_cluster_ids" binding:"omitempty"`
|
||||
CDNClusterIDs []uint `json:"cdn_cluster_ids" binding:"omitempty"`
|
||||
SchedulerClusterIDs []uint `json:"scheduler_cluster_ids" binding:"omitempty"`
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -33,7 +33,6 @@ type CreateSchedulerClusterRequest struct {
|
|||
Scopes *SchedulerClusterScopes `json:"scopes" binding:"omitempty"`
|
||||
IsDefault bool `json:"is_default" binding:"omitempty"`
|
||||
SeedPeerClusterID uint `json:"seed_peer_cluster_id" binding:"omitempty"`
|
||||
CDNClusterID uint `json:"cdn_cluster_id" binding:"omitempty"`
|
||||
}
|
||||
|
||||
type UpdateSchedulerClusterRequest struct {
|
||||
|
|
@ -44,7 +43,6 @@ type UpdateSchedulerClusterRequest struct {
|
|||
Scopes *SchedulerClusterScopes `json:"scopes" binding:"omitempty"`
|
||||
IsDefault bool `json:"is_default" binding:"omitempty"`
|
||||
SeedPeerClusterID uint `json:"seed_peer_cluster_id" binding:"omitempty"`
|
||||
CDNClusterID uint `json:"cdn_cluster_id" binding:"omitempty"`
|
||||
}
|
||||
|
||||
type GetSchedulerClustersQuery struct {
|
||||
|
|
|
|||
|
|
@ -30,11 +30,6 @@ type AddSeedPeerClusterToSecurityGroupParams struct {
|
|||
SeedPeerClusterID uint `uri:"seed_peer_cluster_id" binding:"required"`
|
||||
}
|
||||
|
||||
type AddCDNClusterToSecurityGroupParams struct {
|
||||
ID uint `uri:"id" binding:"required"`
|
||||
CDNClusterID uint `uri:"cdn_cluster_id" binding:"required"`
|
||||
}
|
||||
|
||||
type AddSecurityRuleToSecurityGroupParams struct {
|
||||
ID uint `uri:"id" binding:"required"`
|
||||
SecurityRuleID uint `uri:"security_rule_id" binding:"required"`
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ type SeedPeerParams struct {
|
|||
|
||||
type CreateSeedPeerRequest struct {
|
||||
HostName string `json:"host_name" binding:"required"`
|
||||
Type string `json:"type" binding:"required"`
|
||||
Type string `json:"type" binding:"required,oneof=super strong weak"`
|
||||
IDC string `json:"idc" binding:"omitempty"`
|
||||
NetTopology string `json:"net_topology" binding:"omitempty"`
|
||||
Location string `json:"location" binding:"omitempty"`
|
||||
|
|
@ -33,7 +33,7 @@ type CreateSeedPeerRequest struct {
|
|||
}
|
||||
|
||||
type UpdateSeedPeerRequest struct {
|
||||
Type string `json:"type" binding:"omitempty"`
|
||||
Type string `json:"type" binding:"omitempty,oneof=super strong weak"`
|
||||
IDC string `json:"idc" binding:"omitempty"`
|
||||
NetTopology string `json:"net_topology" binding:"omitempty"`
|
||||
Location string `json:"location" binding:"omitempty"`
|
||||
|
|
@ -45,7 +45,7 @@ type UpdateSeedPeerRequest struct {
|
|||
|
||||
type GetSeedPeersQuery struct {
|
||||
HostName string `form:"host_name" binding:"omitempty"`
|
||||
Type string `form:"type" binding:"omitempty"`
|
||||
Type string `form:"type" binding:"omitempty,oneof=super strong weak"`
|
||||
IDC string `form:"idc" binding:"omitempty"`
|
||||
Location string `form:"location" binding:"omitempty"`
|
||||
IP string `form:"ip" binding:"omitempty"`
|
||||
|
|
|
|||
|
|
@ -24,10 +24,6 @@ func HostID(hostname string, port int32) string {
|
|||
return fmt.Sprintf("%s-%d", hostname, port)
|
||||
}
|
||||
|
||||
func SeedHostID(hostname string, port int32) string {
|
||||
return fmt.Sprintf("%s_Seed", HostID(hostname, port))
|
||||
}
|
||||
|
||||
func CDNHostID(hostname string, port int32) string {
|
||||
return fmt.Sprintf("%s_CDN", HostID(hostname, port))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -65,49 +65,6 @@ func TestHostID(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestSeedHostID(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
hostname string
|
||||
port int32
|
||||
expect func(t *testing.T, d string)
|
||||
}{
|
||||
{
|
||||
name: "generate SeedHostID with ipv4",
|
||||
hostname: "foo",
|
||||
port: 8000,
|
||||
expect: func(t *testing.T, d string) {
|
||||
assert := assert.New(t)
|
||||
assert.Equal(d, "foo-8000_Seed")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "generate SeedHostID with empty host",
|
||||
hostname: "",
|
||||
port: 8000,
|
||||
expect: func(t *testing.T, d string) {
|
||||
assert := assert.New(t)
|
||||
assert.Equal(d, "-8000_Seed")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "generate SeedHostID with zero port",
|
||||
hostname: "foo",
|
||||
port: 0,
|
||||
expect: func(t *testing.T, d string) {
|
||||
assert := assert.New(t)
|
||||
assert.Equal(d, "foo-0_Seed")
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
tc.expect(t, SeedHostID(tc.hostname, tc.port))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCDNHostID(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
|
|
|
|||
|
|
@ -113,8 +113,8 @@ type PieceSeed struct {
|
|||
|
||||
// peer id for cdn node, need suffix with _CDN
|
||||
PeerId string `protobuf:"bytes,2,opt,name=peer_id,json=peerId,proto3" json:"peer_id,omitempty"`
|
||||
// cdn node host uuid
|
||||
HostUuid string `protobuf:"bytes,3,opt,name=host_uuid,json=hostUuid,proto3" json:"host_uuid,omitempty"`
|
||||
// cdn host id
|
||||
HostId string `protobuf:"bytes,3,opt,name=host_id,json=hostId,proto3" json:"host_id,omitempty"`
|
||||
PieceInfo *base.PieceInfo `protobuf:"bytes,4,opt,name=piece_info,json=pieceInfo,proto3" json:"piece_info,omitempty"`
|
||||
// whether or not all seeds are downloaded
|
||||
Done bool `protobuf:"varint,5,opt,name=done,proto3" json:"done,omitempty"`
|
||||
|
|
@ -167,9 +167,9 @@ func (x *PieceSeed) GetPeerId() string {
|
|||
return ""
|
||||
}
|
||||
|
||||
func (x *PieceSeed) GetHostUuid() string {
|
||||
func (x *PieceSeed) GetHostId() string {
|
||||
if x != nil {
|
||||
return x.HostUuid
|
||||
return x.HostId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
|
@ -232,41 +232,41 @@ var file_pkg_rpc_cdnsystem_cdnsystem_proto_rawDesc = []byte{
|
|||
0xfa, 0x42, 0x05, 0x72, 0x03, 0x88, 0x01, 0x01, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x28, 0x0a,
|
||||
0x08, 0x75, 0x72, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
|
||||
0x0d, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x55, 0x72, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x07,
|
||||
0x75, 0x72, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x22, 0xa4, 0x02, 0x0a, 0x09, 0x50, 0x69, 0x65, 0x63,
|
||||
0x75, 0x72, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x22, 0xa0, 0x02, 0x0a, 0x09, 0x50, 0x69, 0x65, 0x63,
|
||||
0x65, 0x53, 0x65, 0x65, 0x64, 0x12, 0x20, 0x0a, 0x07, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64,
|
||||
0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52,
|
||||
0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, 0x12, 0x24, 0x0a, 0x09, 0x68, 0x6f, 0x73, 0x74, 0x5f,
|
||||
0x75, 0x75, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72,
|
||||
0x02, 0x10, 0x01, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x55, 0x75, 0x69, 0x64, 0x12, 0x2e, 0x0a,
|
||||
0x0a, 0x70, 0x69, 0x65, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28,
|
||||
0x0b, 0x32, 0x0f, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65, 0x49, 0x6e,
|
||||
0x66, 0x6f, 0x52, 0x09, 0x70, 0x69, 0x65, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a,
|
||||
0x04, 0x64, 0x6f, 0x6e, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x64, 0x6f, 0x6e,
|
||||
0x65, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x65, 0x6e,
|
||||
0x67, 0x74, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65,
|
||||
0x6e, 0x74, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x2a, 0x0a, 0x11, 0x74, 0x6f, 0x74, 0x61,
|
||||
0x6c, 0x5f, 0x70, 0x69, 0x65, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20,
|
||||
0x01, 0x28, 0x05, 0x52, 0x0f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x69, 0x65, 0x63, 0x65, 0x43,
|
||||
0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x5f, 0x74, 0x69,
|
||||
0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x54,
|
||||
0x69, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18,
|
||||
0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x32, 0xc4,
|
||||
0x01, 0x0a, 0x06, 0x53, 0x65, 0x65, 0x64, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x0b, 0x4f, 0x62, 0x74,
|
||||
0x61, 0x69, 0x6e, 0x53, 0x65, 0x65, 0x64, 0x73, 0x12, 0x16, 0x2e, 0x63, 0x64, 0x6e, 0x73, 0x79,
|
||||
0x73, 0x74, 0x65, 0x6d, 0x2e, 0x53, 0x65, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
|
||||
0x1a, 0x14, 0x2e, 0x63, 0x64, 0x6e, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2e, 0x50, 0x69, 0x65,
|
||||
0x63, 0x65, 0x53, 0x65, 0x65, 0x64, 0x30, 0x01, 0x12, 0x3a, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x50,
|
||||
0x69, 0x65, 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x16, 0x2e, 0x62, 0x61, 0x73, 0x65,
|
||||
0x2e, 0x50, 0x69, 0x65, 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
|
||||
0x74, 0x1a, 0x11, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65, 0x50, 0x61,
|
||||
0x63, 0x6b, 0x65, 0x74, 0x12, 0x3f, 0x0a, 0x0e, 0x53, 0x79, 0x6e, 0x63, 0x50, 0x69, 0x65, 0x63,
|
||||
0x65, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x16, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x50, 0x69,
|
||||
0x65, 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11,
|
||||
0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65, 0x50, 0x61, 0x63, 0x6b, 0x65,
|
||||
0x74, 0x28, 0x01, 0x30, 0x01, 0x42, 0x27, 0x5a, 0x25, 0x64, 0x37, 0x79, 0x2e, 0x69, 0x6f, 0x2f,
|
||||
0x64, 0x72, 0x61, 0x67, 0x6f, 0x6e, 0x66, 0x6c, 0x79, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x6b, 0x67,
|
||||
0x2f, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x64, 0x6e, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x62, 0x06,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x07, 0x68, 0x6f, 0x73, 0x74, 0x5f,
|
||||
0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10,
|
||||
0x01, 0x52, 0x06, 0x68, 0x6f, 0x73, 0x74, 0x49, 0x64, 0x12, 0x2e, 0x0a, 0x0a, 0x70, 0x69, 0x65,
|
||||
0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
|
||||
0x62, 0x61, 0x73, 0x65, 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09,
|
||||
0x70, 0x69, 0x65, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x6f, 0x6e,
|
||||
0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x12, 0x25, 0x0a,
|
||||
0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18,
|
||||
0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4c, 0x65,
|
||||
0x6e, 0x67, 0x74, 0x68, 0x12, 0x2a, 0x0a, 0x11, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x69,
|
||||
0x65, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52,
|
||||
0x0f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x69, 0x65, 0x63, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74,
|
||||
0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x08,
|
||||
0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12,
|
||||
0x19, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28,
|
||||
0x04, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x32, 0xc4, 0x01, 0x0a, 0x06, 0x53,
|
||||
0x65, 0x65, 0x64, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x0b, 0x4f, 0x62, 0x74, 0x61, 0x69, 0x6e, 0x53,
|
||||
0x65, 0x65, 0x64, 0x73, 0x12, 0x16, 0x2e, 0x63, 0x64, 0x6e, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d,
|
||||
0x2e, 0x53, 0x65, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x63,
|
||||
0x64, 0x6e, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65, 0x53, 0x65,
|
||||
0x65, 0x64, 0x30, 0x01, 0x12, 0x3a, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x50, 0x69, 0x65, 0x63, 0x65,
|
||||
0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x16, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x50, 0x69, 0x65,
|
||||
0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e,
|
||||
0x62, 0x61, 0x73, 0x65, 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74,
|
||||
0x12, 0x3f, 0x0a, 0x0e, 0x53, 0x79, 0x6e, 0x63, 0x50, 0x69, 0x65, 0x63, 0x65, 0x54, 0x61, 0x73,
|
||||
0x6b, 0x73, 0x12, 0x16, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65, 0x54,
|
||||
0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x62, 0x61, 0x73,
|
||||
0x65, 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x28, 0x01, 0x30,
|
||||
0x01, 0x42, 0x27, 0x5a, 0x25, 0x64, 0x37, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x64, 0x72, 0x61, 0x67,
|
||||
0x6f, 0x6e, 0x66, 0x6c, 0x79, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x72, 0x70, 0x63,
|
||||
0x2f, 0x63, 0x64, 0x6e, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
|
|
|
|||
|
|
@ -142,9 +142,9 @@ func (m *PieceSeed) Validate() error {
|
|||
}
|
||||
}
|
||||
|
||||
if utf8.RuneCountInString(m.GetHostUuid()) < 1 {
|
||||
if utf8.RuneCountInString(m.GetHostId()) < 1 {
|
||||
return PieceSeedValidationError{
|
||||
field: "HostUuid",
|
||||
field: "HostId",
|
||||
reason: "value length must be at least 1 runes",
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -34,8 +34,8 @@ message SeedRequest{
|
|||
message PieceSeed{
|
||||
// peer id for cdn node, need suffix with _CDN
|
||||
string peer_id = 2 [(validate.rules).string.min_len = 1];
|
||||
// cdn node host uuid
|
||||
string host_uuid = 3 [(validate.rules).string.min_len = 1];
|
||||
// cdn host id
|
||||
string host_id = 3 [(validate.rules).string.min_len = 1];
|
||||
base.PieceInfo piece_info = 4;
|
||||
|
||||
// whether or not all seeds are downloaded
|
||||
|
|
|
|||
|
|
@ -31,17 +31,13 @@ import (
|
|||
"d7y.io/dragonfly/v2/pkg/rpc/cdnsystem"
|
||||
)
|
||||
|
||||
func GetClientByAddr(addrs []dfnet.NetAddr, opts ...grpc.DialOption) (CdnClient, error) {
|
||||
if len(addrs) == 0 {
|
||||
return nil, errors.New("address list of cdn is empty")
|
||||
}
|
||||
cc := &cdnClient{
|
||||
func GetClientByAddr(addrs []dfnet.NetAddr, opts ...grpc.DialOption) CdnClient {
|
||||
return &cdnClient{
|
||||
rpc.NewConnection(context.Background(), "cdn", addrs, []rpc.ConnOption{
|
||||
rpc.WithConnExpireTime(60 * time.Second),
|
||||
rpc.WithDialOption(opts),
|
||||
}),
|
||||
}
|
||||
return cc, nil
|
||||
}
|
||||
|
||||
var once sync.Once
|
||||
|
|
|
|||
|
|
@ -28,7 +28,9 @@ import (
|
|||
grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/backoff"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
logger "d7y.io/dragonfly/v2/internal/dflog"
|
||||
"d7y.io/dragonfly/v2/pkg/dfnet"
|
||||
|
|
@ -48,9 +50,6 @@ type Client interface {
|
|||
// Update Seed peer configuration.
|
||||
UpdateSeedPeer(*manager.UpdateSeedPeerRequest) (*manager.SeedPeer, error)
|
||||
|
||||
// Update CDN configuration.
|
||||
UpdateCDN(*manager.UpdateCDNRequest) (*manager.CDN, error)
|
||||
|
||||
// Get Scheduler and Scheduler cluster configuration.
|
||||
GetScheduler(*manager.GetSchedulerRequest) (*manager.Scheduler, error)
|
||||
|
||||
|
|
@ -120,13 +119,6 @@ func (c *client) UpdateSeedPeer(req *manager.UpdateSeedPeerRequest) (*manager.Se
|
|||
return c.ManagerClient.UpdateSeedPeer(ctx, req)
|
||||
}
|
||||
|
||||
func (c *client) UpdateCDN(req *manager.UpdateCDNRequest) (*manager.CDN, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), contextTimeout)
|
||||
defer cancel()
|
||||
|
||||
return c.ManagerClient.UpdateCDN(ctx, req)
|
||||
}
|
||||
|
||||
func (c *client) GetScheduler(req *manager.GetSchedulerRequest) (*manager.Scheduler, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), contextTimeout)
|
||||
defer cancel()
|
||||
|
|
@ -153,6 +145,12 @@ retry:
|
|||
ctx, cancel := context.WithCancel(context.Background())
|
||||
stream, err := c.ManagerClient.KeepAlive(ctx)
|
||||
if err != nil {
|
||||
if status.Code(err) == codes.Canceled {
|
||||
logger.Infof("hostname %s cluster id %d stop keepalive", keepalive.HostName, keepalive.ClusterId)
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
|
||||
time.Sleep(interval)
|
||||
cancel()
|
||||
goto retry
|
||||
|
|
@ -168,7 +166,7 @@ retry:
|
|||
ClusterId: keepalive.ClusterId,
|
||||
}); err != nil {
|
||||
if _, err := stream.CloseAndRecv(); err != nil {
|
||||
logger.Errorf("hostname %s cluster id %v close and recv stream failed: %v", keepalive.HostName, keepalive.ClusterId, err)
|
||||
logger.Errorf("hostname %s cluster id %d close and recv stream failed: %v", keepalive.HostName, keepalive.ClusterId, err)
|
||||
}
|
||||
|
||||
cancel()
|
||||
|
|
|
|||
|
|
@ -91,21 +91,6 @@ func (mr *MockClientMockRecorder) ListSchedulers(arg0 interface{}) *gomock.Call
|
|||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSchedulers", reflect.TypeOf((*MockClient)(nil).ListSchedulers), arg0)
|
||||
}
|
||||
|
||||
// UpdateCDN mocks base method.
|
||||
func (m *MockClient) UpdateCDN(arg0 *manager.UpdateCDNRequest) (*manager.CDN, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "UpdateCDN", arg0)
|
||||
ret0, _ := ret[0].(*manager.CDN)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// UpdateCDN indicates an expected call of UpdateCDN.
|
||||
func (mr *MockClientMockRecorder) UpdateCDN(arg0 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateCDN", reflect.TypeOf((*MockClient)(nil).UpdateCDN), arg0)
|
||||
}
|
||||
|
||||
// UpdateScheduler mocks base method.
|
||||
func (m *MockClient) UpdateScheduler(arg0 *manager.UpdateSchedulerRequest) (*manager.Scheduler, error) {
|
||||
m.ctrl.T.Helper()
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -108,472 +108,6 @@ var _ interface {
|
|||
ErrorName() string
|
||||
} = SecurityGroupValidationError{}
|
||||
|
||||
// Validate checks the field values on CDNCluster with the rules defined in the
|
||||
// proto definition for this message. If any rules are violated, an error is returned.
|
||||
func (m *CDNCluster) Validate() error {
|
||||
if m == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// no validation rules for Id
|
||||
|
||||
// no validation rules for Name
|
||||
|
||||
// no validation rules for Bio
|
||||
|
||||
// no validation rules for Config
|
||||
|
||||
if v, ok := interface{}(m.GetSecurityGroup()).(interface{ Validate() error }); ok {
|
||||
if err := v.Validate(); err != nil {
|
||||
return CDNClusterValidationError{
|
||||
field: "SecurityGroup",
|
||||
reason: "embedded message failed validation",
|
||||
cause: err,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CDNClusterValidationError is the validation error returned by
|
||||
// CDNCluster.Validate if the designated constraints aren't met.
|
||||
type CDNClusterValidationError struct {
|
||||
field string
|
||||
reason string
|
||||
cause error
|
||||
key bool
|
||||
}
|
||||
|
||||
// Field function returns field value.
|
||||
func (e CDNClusterValidationError) Field() string { return e.field }
|
||||
|
||||
// Reason function returns reason value.
|
||||
func (e CDNClusterValidationError) Reason() string { return e.reason }
|
||||
|
||||
// Cause function returns cause value.
|
||||
func (e CDNClusterValidationError) Cause() error { return e.cause }
|
||||
|
||||
// Key function returns key value.
|
||||
func (e CDNClusterValidationError) Key() bool { return e.key }
|
||||
|
||||
// ErrorName returns error name.
|
||||
func (e CDNClusterValidationError) ErrorName() string { return "CDNClusterValidationError" }
|
||||
|
||||
// Error satisfies the builtin error interface
|
||||
func (e CDNClusterValidationError) Error() string {
|
||||
cause := ""
|
||||
if e.cause != nil {
|
||||
cause = fmt.Sprintf(" | caused by: %v", e.cause)
|
||||
}
|
||||
|
||||
key := ""
|
||||
if e.key {
|
||||
key = "key for "
|
||||
}
|
||||
|
||||
return fmt.Sprintf(
|
||||
"invalid %sCDNCluster.%s: %s%s",
|
||||
key,
|
||||
e.field,
|
||||
e.reason,
|
||||
cause)
|
||||
}
|
||||
|
||||
var _ error = CDNClusterValidationError{}
|
||||
|
||||
var _ interface {
|
||||
Field() string
|
||||
Reason() string
|
||||
Key() bool
|
||||
Cause() error
|
||||
ErrorName() string
|
||||
} = CDNClusterValidationError{}
|
||||
|
||||
// Validate checks the field values on CDN with the rules defined in the proto
|
||||
// definition for this message. If any rules are violated, an error is returned.
|
||||
func (m *CDN) Validate() error {
|
||||
if m == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// no validation rules for Id
|
||||
|
||||
// no validation rules for HostName
|
||||
|
||||
// no validation rules for Idc
|
||||
|
||||
// no validation rules for Location
|
||||
|
||||
// no validation rules for Ip
|
||||
|
||||
// no validation rules for Port
|
||||
|
||||
// no validation rules for DownloadPort
|
||||
|
||||
// no validation rules for State
|
||||
|
||||
// no validation rules for CdnClusterId
|
||||
|
||||
if v, ok := interface{}(m.GetCdnCluster()).(interface{ Validate() error }); ok {
|
||||
if err := v.Validate(); err != nil {
|
||||
return CDNValidationError{
|
||||
field: "CdnCluster",
|
||||
reason: "embedded message failed validation",
|
||||
cause: err,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for idx, item := range m.GetSchedulers() {
|
||||
_, _ = idx, item
|
||||
|
||||
if v, ok := interface{}(item).(interface{ Validate() error }); ok {
|
||||
if err := v.Validate(); err != nil {
|
||||
return CDNValidationError{
|
||||
field: fmt.Sprintf("Schedulers[%v]", idx),
|
||||
reason: "embedded message failed validation",
|
||||
cause: err,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CDNValidationError is the validation error returned by CDN.Validate if the
|
||||
// designated constraints aren't met.
|
||||
type CDNValidationError struct {
|
||||
field string
|
||||
reason string
|
||||
cause error
|
||||
key bool
|
||||
}
|
||||
|
||||
// Field function returns field value.
|
||||
func (e CDNValidationError) Field() string { return e.field }
|
||||
|
||||
// Reason function returns reason value.
|
||||
func (e CDNValidationError) Reason() string { return e.reason }
|
||||
|
||||
// Cause function returns cause value.
|
||||
func (e CDNValidationError) Cause() error { return e.cause }
|
||||
|
||||
// Key function returns key value.
|
||||
func (e CDNValidationError) Key() bool { return e.key }
|
||||
|
||||
// ErrorName returns error name.
|
||||
func (e CDNValidationError) ErrorName() string { return "CDNValidationError" }
|
||||
|
||||
// Error satisfies the builtin error interface
|
||||
func (e CDNValidationError) Error() string {
|
||||
cause := ""
|
||||
if e.cause != nil {
|
||||
cause = fmt.Sprintf(" | caused by: %v", e.cause)
|
||||
}
|
||||
|
||||
key := ""
|
||||
if e.key {
|
||||
key = "key for "
|
||||
}
|
||||
|
||||
return fmt.Sprintf(
|
||||
"invalid %sCDN.%s: %s%s",
|
||||
key,
|
||||
e.field,
|
||||
e.reason,
|
||||
cause)
|
||||
}
|
||||
|
||||
var _ error = CDNValidationError{}
|
||||
|
||||
var _ interface {
|
||||
Field() string
|
||||
Reason() string
|
||||
Key() bool
|
||||
Cause() error
|
||||
ErrorName() string
|
||||
} = CDNValidationError{}
|
||||
|
||||
// Validate checks the field values on GetCDNRequest with the rules defined in
|
||||
// the proto definition for this message. If any rules are violated, an error
|
||||
// is returned.
|
||||
func (m *GetCDNRequest) Validate() error {
|
||||
if m == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, ok := SourceType_name[int32(m.GetSourceType())]; !ok {
|
||||
return GetCDNRequestValidationError{
|
||||
field: "SourceType",
|
||||
reason: "value must be one of the defined enum values",
|
||||
}
|
||||
}
|
||||
|
||||
if err := m._validateHostname(m.GetHostName()); err != nil {
|
||||
return GetCDNRequestValidationError{
|
||||
field: "HostName",
|
||||
reason: "value must be a valid hostname",
|
||||
cause: err,
|
||||
}
|
||||
}
|
||||
|
||||
if m.GetCdnClusterId() < 1 {
|
||||
return GetCDNRequestValidationError{
|
||||
field: "CdnClusterId",
|
||||
reason: "value must be greater than or equal to 1",
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *GetCDNRequest) _validateHostname(host string) error {
|
||||
s := strings.ToLower(strings.TrimSuffix(host, "."))
|
||||
|
||||
if len(host) > 253 {
|
||||
return errors.New("hostname cannot exceed 253 characters")
|
||||
}
|
||||
|
||||
for _, part := range strings.Split(s, ".") {
|
||||
if l := len(part); l == 0 || l > 63 {
|
||||
return errors.New("hostname part must be non-empty and cannot exceed 63 characters")
|
||||
}
|
||||
|
||||
if part[0] == '-' {
|
||||
return errors.New("hostname parts cannot begin with hyphens")
|
||||
}
|
||||
|
||||
if part[len(part)-1] == '-' {
|
||||
return errors.New("hostname parts cannot end with hyphens")
|
||||
}
|
||||
|
||||
for _, r := range part {
|
||||
if (r < 'a' || r > 'z') && (r < '0' || r > '9') && r != '-' {
|
||||
return fmt.Errorf("hostname parts can only contain alphanumeric characters or hyphens, got %q", string(r))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetCDNRequestValidationError is the validation error returned by
|
||||
// GetCDNRequest.Validate if the designated constraints aren't met.
|
||||
type GetCDNRequestValidationError struct {
|
||||
field string
|
||||
reason string
|
||||
cause error
|
||||
key bool
|
||||
}
|
||||
|
||||
// Field function returns field value.
|
||||
func (e GetCDNRequestValidationError) Field() string { return e.field }
|
||||
|
||||
// Reason function returns reason value.
|
||||
func (e GetCDNRequestValidationError) Reason() string { return e.reason }
|
||||
|
||||
// Cause function returns cause value.
|
||||
func (e GetCDNRequestValidationError) Cause() error { return e.cause }
|
||||
|
||||
// Key function returns key value.
|
||||
func (e GetCDNRequestValidationError) Key() bool { return e.key }
|
||||
|
||||
// ErrorName returns error name.
|
||||
func (e GetCDNRequestValidationError) ErrorName() string { return "GetCDNRequestValidationError" }
|
||||
|
||||
// Error satisfies the builtin error interface
|
||||
func (e GetCDNRequestValidationError) Error() string {
|
||||
cause := ""
|
||||
if e.cause != nil {
|
||||
cause = fmt.Sprintf(" | caused by: %v", e.cause)
|
||||
}
|
||||
|
||||
key := ""
|
||||
if e.key {
|
||||
key = "key for "
|
||||
}
|
||||
|
||||
return fmt.Sprintf(
|
||||
"invalid %sGetCDNRequest.%s: %s%s",
|
||||
key,
|
||||
e.field,
|
||||
e.reason,
|
||||
cause)
|
||||
}
|
||||
|
||||
var _ error = GetCDNRequestValidationError{}
|
||||
|
||||
var _ interface {
|
||||
Field() string
|
||||
Reason() string
|
||||
Key() bool
|
||||
Cause() error
|
||||
ErrorName() string
|
||||
} = GetCDNRequestValidationError{}
|
||||
|
||||
// Validate checks the field values on UpdateCDNRequest with the rules defined
|
||||
// in the proto definition for this message. If any rules are violated, an
|
||||
// error is returned.
|
||||
func (m *UpdateCDNRequest) Validate() error {
|
||||
if m == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, ok := SourceType_name[int32(m.GetSourceType())]; !ok {
|
||||
return UpdateCDNRequestValidationError{
|
||||
field: "SourceType",
|
||||
reason: "value must be one of the defined enum values",
|
||||
}
|
||||
}
|
||||
|
||||
if err := m._validateHostname(m.GetHostName()); err != nil {
|
||||
return UpdateCDNRequestValidationError{
|
||||
field: "HostName",
|
||||
reason: "value must be a valid hostname",
|
||||
cause: err,
|
||||
}
|
||||
}
|
||||
|
||||
if m.GetIdc() != "" {
|
||||
|
||||
if l := utf8.RuneCountInString(m.GetIdc()); l < 1 || l > 1024 {
|
||||
return UpdateCDNRequestValidationError{
|
||||
field: "Idc",
|
||||
reason: "value length must be between 1 and 1024 runes, inclusive",
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if m.GetLocation() != "" {
|
||||
|
||||
if utf8.RuneCountInString(m.GetLocation()) > 1024 {
|
||||
return UpdateCDNRequestValidationError{
|
||||
field: "Location",
|
||||
reason: "value length must be at most 1024 runes",
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if ip := net.ParseIP(m.GetIp()); ip == nil {
|
||||
return UpdateCDNRequestValidationError{
|
||||
field: "Ip",
|
||||
reason: "value must be a valid IP address",
|
||||
}
|
||||
}
|
||||
|
||||
if val := m.GetPort(); val < 1024 || val >= 65535 {
|
||||
return UpdateCDNRequestValidationError{
|
||||
field: "Port",
|
||||
reason: "value must be inside range [1024, 65535)",
|
||||
}
|
||||
}
|
||||
|
||||
if val := m.GetDownloadPort(); val < 1024 || val >= 65535 {
|
||||
return UpdateCDNRequestValidationError{
|
||||
field: "DownloadPort",
|
||||
reason: "value must be inside range [1024, 65535)",
|
||||
}
|
||||
}
|
||||
|
||||
if m.GetCdnClusterId() < 1 {
|
||||
return UpdateCDNRequestValidationError{
|
||||
field: "CdnClusterId",
|
||||
reason: "value must be greater than or equal to 1",
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *UpdateCDNRequest) _validateHostname(host string) error {
|
||||
s := strings.ToLower(strings.TrimSuffix(host, "."))
|
||||
|
||||
if len(host) > 253 {
|
||||
return errors.New("hostname cannot exceed 253 characters")
|
||||
}
|
||||
|
||||
for _, part := range strings.Split(s, ".") {
|
||||
if l := len(part); l == 0 || l > 63 {
|
||||
return errors.New("hostname part must be non-empty and cannot exceed 63 characters")
|
||||
}
|
||||
|
||||
if part[0] == '-' {
|
||||
return errors.New("hostname parts cannot begin with hyphens")
|
||||
}
|
||||
|
||||
if part[len(part)-1] == '-' {
|
||||
return errors.New("hostname parts cannot end with hyphens")
|
||||
}
|
||||
|
||||
for _, r := range part {
|
||||
if (r < 'a' || r > 'z') && (r < '0' || r > '9') && r != '-' {
|
||||
return fmt.Errorf("hostname parts can only contain alphanumeric characters or hyphens, got %q", string(r))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateCDNRequestValidationError is the validation error returned by
|
||||
// UpdateCDNRequest.Validate if the designated constraints aren't met.
|
||||
type UpdateCDNRequestValidationError struct {
|
||||
field string
|
||||
reason string
|
||||
cause error
|
||||
key bool
|
||||
}
|
||||
|
||||
// Field function returns field value.
|
||||
func (e UpdateCDNRequestValidationError) Field() string { return e.field }
|
||||
|
||||
// Reason function returns reason value.
|
||||
func (e UpdateCDNRequestValidationError) Reason() string { return e.reason }
|
||||
|
||||
// Cause function returns cause value.
|
||||
func (e UpdateCDNRequestValidationError) Cause() error { return e.cause }
|
||||
|
||||
// Key function returns key value.
|
||||
func (e UpdateCDNRequestValidationError) Key() bool { return e.key }
|
||||
|
||||
// ErrorName returns error name.
|
||||
func (e UpdateCDNRequestValidationError) ErrorName() string { return "UpdateCDNRequestValidationError" }
|
||||
|
||||
// Error satisfies the builtin error interface
|
||||
func (e UpdateCDNRequestValidationError) Error() string {
|
||||
cause := ""
|
||||
if e.cause != nil {
|
||||
cause = fmt.Sprintf(" | caused by: %v", e.cause)
|
||||
}
|
||||
|
||||
key := ""
|
||||
if e.key {
|
||||
key = "key for "
|
||||
}
|
||||
|
||||
return fmt.Sprintf(
|
||||
"invalid %sUpdateCDNRequest.%s: %s%s",
|
||||
key,
|
||||
e.field,
|
||||
e.reason,
|
||||
cause)
|
||||
}
|
||||
|
||||
var _ error = UpdateCDNRequestValidationError{}
|
||||
|
||||
var _ interface {
|
||||
Field() string
|
||||
Reason() string
|
||||
Key() bool
|
||||
Cause() error
|
||||
ErrorName() string
|
||||
} = UpdateCDNRequestValidationError{}
|
||||
|
||||
// Validate checks the field values on SeedPeerCluster with the rules defined
|
||||
// in the proto definition for this message. If any rules are violated, an
|
||||
// error is returned.
|
||||
|
|
@ -672,6 +206,8 @@ func (m *SeedPeer) Validate() error {
|
|||
|
||||
// no validation rules for Type
|
||||
|
||||
// no validation rules for IsCdn
|
||||
|
||||
// no validation rules for Idc
|
||||
|
||||
// no validation rules for NetTopology
|
||||
|
|
@ -912,13 +448,15 @@ func (m *UpdateSeedPeerRequest) Validate() error {
|
|||
}
|
||||
}
|
||||
|
||||
if l := utf8.RuneCountInString(m.GetType()); l < 1 || l > 1024 {
|
||||
if _, ok := _UpdateSeedPeerRequest_Type_InLookup[m.GetType()]; !ok {
|
||||
return UpdateSeedPeerRequestValidationError{
|
||||
field: "Type",
|
||||
reason: "value length must be between 1 and 1024 runes, inclusive",
|
||||
reason: "value must be in list [super strong weak]",
|
||||
}
|
||||
}
|
||||
|
||||
// no validation rules for IsCdn
|
||||
|
||||
if m.GetIdc() != "" {
|
||||
|
||||
if l := utf8.RuneCountInString(m.GetIdc()); l < 1 || l > 1024 {
|
||||
|
|
@ -1069,6 +607,12 @@ var _ interface {
|
|||
ErrorName() string
|
||||
} = UpdateSeedPeerRequestValidationError{}
|
||||
|
||||
var _UpdateSeedPeerRequest_Type_InLookup = map[string]struct{}{
|
||||
"super": {},
|
||||
"strong": {},
|
||||
"weak": {},
|
||||
}
|
||||
|
||||
// Validate checks the field values on SchedulerCluster with the rules defined
|
||||
// in the proto definition for this message. If any rules are violated, an
|
||||
// error is returned.
|
||||
|
|
@ -1193,21 +737,6 @@ func (m *Scheduler) Validate() error {
|
|||
}
|
||||
}
|
||||
|
||||
for idx, item := range m.GetCdns() {
|
||||
_, _ = idx, item
|
||||
|
||||
if v, ok := interface{}(item).(interface{ Validate() error }); ok {
|
||||
if err := v.Validate(); err != nil {
|
||||
return SchedulerValidationError{
|
||||
field: fmt.Sprintf("Cdns[%v]", idx),
|
||||
reason: "embedded message failed validation",
|
||||
cause: err,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
for idx, item := range m.GetSeedPeers() {
|
||||
_, _ = idx, item
|
||||
|
||||
|
|
|
|||
|
|
@ -27,12 +27,10 @@ option go_package = "d7y.io/dragonfly/v2/pkg/rpc/manager";
|
|||
enum SourceType {
|
||||
// Scheduler service.
|
||||
SCHEDULER_SOURCE = 0;
|
||||
// Dfdaemon service.
|
||||
CLIENT_SOURCE = 1;
|
||||
// Deprecated: Use SuperSeed type of SeedPeer instead.
|
||||
CDN_SOURCE = 2;
|
||||
// Peer service.
|
||||
PEER_SOURCE = 1;
|
||||
// SeedPeer service.
|
||||
SEED_PEER_SOURCE = 3;
|
||||
SEED_PEER_SOURCE = 2;
|
||||
}
|
||||
|
||||
// SecurityGroup represents security group of cluster.
|
||||
|
|
@ -49,76 +47,6 @@ message SecurityGroup {
|
|||
string proxy_domain = 5;
|
||||
}
|
||||
|
||||
// Deprecated: Use SeedPeerCluster instead.
|
||||
message CDNCluster {
|
||||
// Cluster id.
|
||||
uint64 id = 1;
|
||||
// Cluster name.
|
||||
string name = 2;
|
||||
// Cluster biography.
|
||||
string bio = 3;
|
||||
// Cluster configuration.
|
||||
bytes config = 4;
|
||||
// Security group to which the cdn cluster belongs.
|
||||
SecurityGroup security_group = 6;
|
||||
}
|
||||
|
||||
// Deprecated: Use SuperSeed type of SeedPeer instead.
|
||||
message CDN {
|
||||
// CDN id.
|
||||
uint64 id = 1;
|
||||
// CDN hostname.
|
||||
string host_name = 2;
|
||||
// CDN idc.
|
||||
string idc = 3;
|
||||
// CDN location.
|
||||
string location = 4;
|
||||
// CDN ip.
|
||||
string ip = 5;
|
||||
// CDN grpc port.
|
||||
int32 port = 6;
|
||||
// CDN download port.
|
||||
int32 download_port = 7;
|
||||
// CDN state.
|
||||
string state = 8;
|
||||
// ID of the cluster to which the cdn belongs.
|
||||
uint64 cdn_cluster_id = 9;
|
||||
// Cluster to which the cdn belongs.
|
||||
CDNCluster cdn_cluster = 10;
|
||||
// Schedulers included in cdn.
|
||||
repeated Scheduler schedulers = 11;
|
||||
}
|
||||
|
||||
// Deprecated: Use GetSeedPeerRequest instead.
|
||||
message GetCDNRequest {
|
||||
// Request source type.
|
||||
SourceType source_type = 1 [(validate.rules).enum.defined_only = true];
|
||||
// CDN hostname.
|
||||
string host_name = 2 [(validate.rules).string.hostname = true];
|
||||
// ID of the cluster to which the cdn belongs.
|
||||
uint64 cdn_cluster_id = 3 [(validate.rules).uint64 = {gte: 1}];
|
||||
}
|
||||
|
||||
// Deprecated: Use UpdateSeedPeerRequest instead.
|
||||
message UpdateCDNRequest {
|
||||
// Request source type.
|
||||
SourceType source_type = 1 [(validate.rules).enum.defined_only = true];
|
||||
// CDN hostname.
|
||||
string host_name = 2 [(validate.rules).string.hostname = true];
|
||||
// CDN idc.
|
||||
string idc = 3 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
|
||||
// CDN location.
|
||||
string location = 4 [(validate.rules).string = {max_len: 1024, ignore_empty: true}];
|
||||
// CDN ip.
|
||||
string ip = 5 [(validate.rules).string = {ip: true}];
|
||||
// CDN grpc port.
|
||||
int32 port = 6 [(validate.rules).int32 = {gte: 1024, lt: 65535}];
|
||||
// CDN download port.
|
||||
int32 download_port = 7 [(validate.rules).int32 = {gte: 1024, lt: 65535}];
|
||||
// ID of the cluster to which the cdn belongs.
|
||||
uint64 cdn_cluster_id = 8 [(validate.rules).uint64 = {gte: 1}];
|
||||
}
|
||||
|
||||
// SeedPeerCluster represents cluster of seed peer.
|
||||
message SeedPeerCluster {
|
||||
// Cluster id.
|
||||
|
|
@ -143,26 +71,28 @@ message SeedPeer {
|
|||
string host_name = 2;
|
||||
// Seed peer type.
|
||||
string type = 3;
|
||||
// CDN seed peer.
|
||||
bool is_cdn = 4;
|
||||
// Seed peer idc.
|
||||
string idc = 4;
|
||||
string idc = 5;
|
||||
// Seed peer network topology.
|
||||
string net_topology = 5;
|
||||
string net_topology = 6;
|
||||
// Seed peer location.
|
||||
string location = 6;
|
||||
string location = 7;
|
||||
// Seed peer ip.
|
||||
string ip = 7;
|
||||
string ip = 8;
|
||||
// Seed peer grpc port.
|
||||
int32 port = 8;
|
||||
int32 port = 9;
|
||||
// Seed peer download port.
|
||||
int32 download_port = 9;
|
||||
int32 download_port = 10;
|
||||
// Seed peer state.
|
||||
string state = 10;
|
||||
string state = 11;
|
||||
// ID of the cluster to which the seed peer belongs.
|
||||
uint64 seed_peer_cluster_id = 11;
|
||||
uint64 seed_peer_cluster_id = 12;
|
||||
// Cluster to which the seed peer belongs.
|
||||
SeedPeerCluster seed_peer_cluster = 12;
|
||||
SeedPeerCluster seed_peer_cluster = 13;
|
||||
// Schedulers included in seed peer.
|
||||
repeated Scheduler schedulers = 13;
|
||||
repeated Scheduler schedulers = 14;
|
||||
}
|
||||
|
||||
// GetSeedPeerRequest represents request of GetSeedPeer.
|
||||
|
|
@ -182,21 +112,23 @@ message UpdateSeedPeerRequest {
|
|||
// Seed peer hostname.
|
||||
string host_name = 2 [(validate.rules).string.hostname = true];
|
||||
// Seed peer type.
|
||||
string type = 3 [(validate.rules).string = {min_len: 1, max_len: 1024}];
|
||||
string type = 3 [(validate.rules).string = {in: ["super", "strong", "weak"]}];
|
||||
// CDN seed peer.
|
||||
bool is_cdn = 4;
|
||||
// Seed peer idc.
|
||||
string idc = 4 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
|
||||
string idc = 5 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
|
||||
// Seed peer network topology.
|
||||
string net_topology = 5 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
|
||||
string net_topology = 6 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
|
||||
// Seed peer location.
|
||||
string location = 6 [(validate.rules).string = {max_len: 1024, ignore_empty: true}];
|
||||
string location = 7 [(validate.rules).string = {max_len: 1024, ignore_empty: true}];
|
||||
// Seed peer ip.
|
||||
string ip = 7 [(validate.rules).string = {ip: true}];
|
||||
string ip = 8 [(validate.rules).string = {ip: true}];
|
||||
// Seed peer port.
|
||||
int32 port = 8 [(validate.rules).int32 = {gte: 1024, lt: 65535}];
|
||||
int32 port = 9 [(validate.rules).int32 = {gte: 1024, lt: 65535}];
|
||||
// Seed peer download port.
|
||||
int32 download_port = 9 [(validate.rules).int32 = {gte: 1024, lt: 65535}];
|
||||
int32 download_port = 10 [(validate.rules).int32 = {gte: 1024, lt: 65535}];
|
||||
// ID of the cluster to which the seed peer belongs.
|
||||
uint64 seed_peer_cluster_id = 10 [(validate.rules).uint64 = {gte: 1}];
|
||||
uint64 seed_peer_cluster_id = 11 [(validate.rules).uint64 = {gte: 1}];
|
||||
}
|
||||
|
||||
// SeedPeerCluster represents cluster of scheduler.
|
||||
|
|
@ -241,8 +173,6 @@ message Scheduler {
|
|||
uint64 scheduler_cluster_id = 10;
|
||||
// Cluster to which the scheduler belongs.
|
||||
SchedulerCluster scheduler_cluster = 11;
|
||||
// Deprecated: Use seed_peers instead.
|
||||
repeated CDN cdns = 12;
|
||||
// Seed peers to which the scheduler belongs.
|
||||
repeated SeedPeer seed_peers = 13;
|
||||
// Scheduler network topology.
|
||||
|
|
@ -317,10 +247,6 @@ service Manager {
|
|||
rpc GetSeedPeer(GetSeedPeerRequest) returns(SeedPeer);
|
||||
// Update SeedPeer configuration.
|
||||
rpc UpdateSeedPeer(UpdateSeedPeerRequest) returns(SeedPeer);
|
||||
// Deprecated: Use GetSeedPeer instead.
|
||||
rpc GetCDN(GetCDNRequest) returns(CDN);
|
||||
// Deprecated: Use UpdateSeedPeer instead.
|
||||
rpc UpdateCDN(UpdateCDNRequest) returns(CDN);
|
||||
// Get Scheduler and Scheduler cluster configuration.
|
||||
rpc GetScheduler(GetSchedulerRequest)returns(Scheduler);
|
||||
// Update scheduler configuration.
|
||||
|
|
|
|||
|
|
@ -38,26 +38,6 @@ func (m *MockManagerClient) EXPECT() *MockManagerClientMockRecorder {
|
|||
return m.recorder
|
||||
}
|
||||
|
||||
// GetCDN mocks base method.
|
||||
func (m *MockManagerClient) GetCDN(ctx context.Context, in *manager.GetCDNRequest, opts ...grpc.CallOption) (*manager.CDN, error) {
|
||||
m.ctrl.T.Helper()
|
||||
varargs := []interface{}{ctx, in}
|
||||
for _, a := range opts {
|
||||
varargs = append(varargs, a)
|
||||
}
|
||||
ret := m.ctrl.Call(m, "GetCDN", varargs...)
|
||||
ret0, _ := ret[0].(*manager.CDN)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetCDN indicates an expected call of GetCDN.
|
||||
func (mr *MockManagerClientMockRecorder) GetCDN(ctx, in interface{}, opts ...interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
varargs := append([]interface{}{ctx, in}, opts...)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCDN", reflect.TypeOf((*MockManagerClient)(nil).GetCDN), varargs...)
|
||||
}
|
||||
|
||||
// GetScheduler mocks base method.
|
||||
func (m *MockManagerClient) GetScheduler(ctx context.Context, in *manager.GetSchedulerRequest, opts ...grpc.CallOption) (*manager.Scheduler, error) {
|
||||
m.ctrl.T.Helper()
|
||||
|
|
@ -138,26 +118,6 @@ func (mr *MockManagerClientMockRecorder) ListSchedulers(ctx, in interface{}, opt
|
|||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSchedulers", reflect.TypeOf((*MockManagerClient)(nil).ListSchedulers), varargs...)
|
||||
}
|
||||
|
||||
// UpdateCDN mocks base method.
|
||||
func (m *MockManagerClient) UpdateCDN(ctx context.Context, in *manager.UpdateCDNRequest, opts ...grpc.CallOption) (*manager.CDN, error) {
|
||||
m.ctrl.T.Helper()
|
||||
varargs := []interface{}{ctx, in}
|
||||
for _, a := range opts {
|
||||
varargs = append(varargs, a)
|
||||
}
|
||||
ret := m.ctrl.Call(m, "UpdateCDN", varargs...)
|
||||
ret0, _ := ret[0].(*manager.CDN)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// UpdateCDN indicates an expected call of UpdateCDN.
|
||||
func (mr *MockManagerClientMockRecorder) UpdateCDN(ctx, in interface{}, opts ...interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
varargs := append([]interface{}{ctx, in}, opts...)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateCDN", reflect.TypeOf((*MockManagerClient)(nil).UpdateCDN), varargs...)
|
||||
}
|
||||
|
||||
// UpdateScheduler mocks base method.
|
||||
func (m *MockManagerClient) UpdateScheduler(ctx context.Context, in *manager.UpdateSchedulerRequest, opts ...grpc.CallOption) (*manager.Scheduler, error) {
|
||||
m.ctrl.T.Helper()
|
||||
|
|
@ -358,21 +318,6 @@ func (m *MockManagerServer) EXPECT() *MockManagerServerMockRecorder {
|
|||
return m.recorder
|
||||
}
|
||||
|
||||
// GetCDN mocks base method.
|
||||
func (m *MockManagerServer) GetCDN(arg0 context.Context, arg1 *manager.GetCDNRequest) (*manager.CDN, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetCDN", arg0, arg1)
|
||||
ret0, _ := ret[0].(*manager.CDN)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetCDN indicates an expected call of GetCDN.
|
||||
func (mr *MockManagerServerMockRecorder) GetCDN(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCDN", reflect.TypeOf((*MockManagerServer)(nil).GetCDN), arg0, arg1)
|
||||
}
|
||||
|
||||
// GetScheduler mocks base method.
|
||||
func (m *MockManagerServer) GetScheduler(arg0 context.Context, arg1 *manager.GetSchedulerRequest) (*manager.Scheduler, error) {
|
||||
m.ctrl.T.Helper()
|
||||
|
|
@ -432,21 +377,6 @@ func (mr *MockManagerServerMockRecorder) ListSchedulers(arg0, arg1 interface{})
|
|||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSchedulers", reflect.TypeOf((*MockManagerServer)(nil).ListSchedulers), arg0, arg1)
|
||||
}
|
||||
|
||||
// UpdateCDN mocks base method.
|
||||
func (m *MockManagerServer) UpdateCDN(arg0 context.Context, arg1 *manager.UpdateCDNRequest) (*manager.CDN, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "UpdateCDN", arg0, arg1)
|
||||
ret0, _ := ret[0].(*manager.CDN)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// UpdateCDN indicates an expected call of UpdateCDN.
|
||||
func (mr *MockManagerServerMockRecorder) UpdateCDN(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateCDN", reflect.TypeOf((*MockManagerServer)(nil).UpdateCDN), arg0, arg1)
|
||||
}
|
||||
|
||||
// UpdateScheduler mocks base method.
|
||||
func (m *MockManagerServer) UpdateScheduler(arg0 context.Context, arg1 *manager.UpdateSchedulerRequest) (*manager.Scheduler, error) {
|
||||
m.ctrl.T.Helper()
|
||||
|
|
|
|||
|
|
@ -369,8 +369,8 @@ type PeerHost struct {
|
|||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// each time the daemon starts, it will generate a different uuid
|
||||
Uuid string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"`
|
||||
// each time the daemon starts, it will generate a different id
|
||||
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
// peer host ip
|
||||
Ip string `protobuf:"bytes,2,opt,name=ip,proto3" json:"ip,omitempty"`
|
||||
// rpc service port for peer
|
||||
|
|
@ -421,9 +421,9 @@ func (*PeerHost) Descriptor() ([]byte, []int) {
|
|||
return file_pkg_rpc_scheduler_scheduler_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
func (x *PeerHost) GetUuid() string {
|
||||
func (x *PeerHost) GetId() string {
|
||||
if x != nil {
|
||||
return x.Uuid
|
||||
return x.Id
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
|
@ -1262,174 +1262,174 @@ var file_pkg_rpc_scheduler_scheduler_proto_rawDesc = []byte{
|
|||
0x02, 0x10, 0x01, 0x52, 0x07, 0x64, 0x73, 0x74, 0x41, 0x64, 0x64, 0x72, 0x12, 0x2e, 0x0a, 0x0a,
|
||||
0x70, 0x69, 0x65, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b,
|
||||
0x32, 0x0f, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65, 0x49, 0x6e, 0x66,
|
||||
0x6f, 0x52, 0x09, 0x70, 0x69, 0x65, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0xb5, 0x02, 0x0a,
|
||||
0x08, 0x50, 0x65, 0x65, 0x72, 0x48, 0x6f, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x04, 0x75, 0x75, 0x69,
|
||||
0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x72, 0x03, 0xb0, 0x01,
|
||||
0x01, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x12, 0x17, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x02, 0x20,
|
||||
0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x70, 0x01, 0x52, 0x02, 0x69, 0x70,
|
||||
0x12, 0x27, 0x0a, 0x08, 0x72, 0x70, 0x63, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01,
|
||||
0x28, 0x05, 0x42, 0x0c, 0xfa, 0x42, 0x09, 0x1a, 0x07, 0x10, 0xff, 0xff, 0x03, 0x28, 0x80, 0x08,
|
||||
0x52, 0x07, 0x72, 0x70, 0x63, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x29, 0x0a, 0x09, 0x64, 0x6f, 0x77,
|
||||
0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x42, 0x0c, 0xfa, 0x42,
|
||||
0x09, 0x1a, 0x07, 0x10, 0xff, 0xff, 0x03, 0x28, 0x80, 0x08, 0x52, 0x08, 0x64, 0x6f, 0x77, 0x6e,
|
||||
0x50, 0x6f, 0x72, 0x74, 0x12, 0x24, 0x0a, 0x09, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x6e, 0x61, 0x6d,
|
||||
0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x68, 0x01,
|
||||
0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x65,
|
||||
0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20,
|
||||
0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x6f, 0x6d,
|
||||
0x61, 0x69, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
|
||||
0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
|
||||
0x10, 0x0a, 0x03, 0x69, 0x64, 0x63, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x69, 0x64,
|
||||
0x63, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x65, 0x74, 0x5f, 0x74, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67,
|
||||
0x79, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f,
|
||||
0x6c, 0x6f, 0x67, 0x79, 0x22, 0xe2, 0x02, 0x0a, 0x0b, 0x50, 0x69, 0x65, 0x63, 0x65, 0x52, 0x65,
|
||||
0x6f, 0x52, 0x09, 0x70, 0x69, 0x65, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0xb0, 0x02, 0x0a,
|
||||
0x08, 0x50, 0x65, 0x65, 0x72, 0x48, 0x6f, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x02, 0x69, 0x64, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x02,
|
||||
0x69, 0x64, 0x12, 0x17, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07,
|
||||
0xfa, 0x42, 0x04, 0x72, 0x02, 0x70, 0x01, 0x52, 0x02, 0x69, 0x70, 0x12, 0x27, 0x0a, 0x08, 0x72,
|
||||
0x70, 0x63, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x42, 0x0c, 0xfa,
|
||||
0x42, 0x09, 0x1a, 0x07, 0x10, 0xff, 0xff, 0x03, 0x28, 0x80, 0x08, 0x52, 0x07, 0x72, 0x70, 0x63,
|
||||
0x50, 0x6f, 0x72, 0x74, 0x12, 0x29, 0x0a, 0x09, 0x64, 0x6f, 0x77, 0x6e, 0x5f, 0x70, 0x6f, 0x72,
|
||||
0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x42, 0x0c, 0xfa, 0x42, 0x09, 0x1a, 0x07, 0x10, 0xff,
|
||||
0xff, 0x03, 0x28, 0x80, 0x08, 0x52, 0x08, 0x64, 0x6f, 0x77, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x12,
|
||||
0x24, 0x0a, 0x09, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01,
|
||||
0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x68, 0x01, 0x52, 0x08, 0x68, 0x6f, 0x73,
|
||||
0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74,
|
||||
0x79, 0x5f, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e,
|
||||
0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x1a,
|
||||
0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09,
|
||||
0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x64,
|
||||
0x63, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x69, 0x64, 0x63, 0x12, 0x21, 0x0a, 0x0c,
|
||||
0x6e, 0x65, 0x74, 0x5f, 0x74, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x18, 0x09, 0x20, 0x01,
|
||||
0x28, 0x09, 0x52, 0x0b, 0x6e, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x22,
|
||||
0xe2, 0x02, 0x0a, 0x0b, 0x50, 0x69, 0x65, 0x63, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12,
|
||||
0x20, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
|
||||
0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49,
|
||||
0x64, 0x12, 0x20, 0x0a, 0x07, 0x73, 0x72, 0x63, 0x5f, 0x70, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01,
|
||||
0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x73, 0x72, 0x63,
|
||||
0x50, 0x69, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x73, 0x74, 0x5f, 0x70, 0x69, 0x64, 0x18, 0x03,
|
||||
0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x73, 0x74, 0x50, 0x69, 0x64, 0x12, 0x2e, 0x0a, 0x0a,
|
||||
0x70, 0x69, 0x65, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b,
|
||||
0x32, 0x0f, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65, 0x49, 0x6e, 0x66,
|
||||
0x6f, 0x52, 0x09, 0x70, 0x69, 0x65, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a,
|
||||
0x62, 0x65, 0x67, 0x69, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04,
|
||||
0x52, 0x09, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x65,
|
||||
0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x65,
|
||||
0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73,
|
||||
0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73,
|
||||
0x12, 0x1e, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0a,
|
||||
0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65,
|
||||
0x12, 0x2b, 0x0a, 0x09, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x09, 0x20,
|
||||
0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x4c,
|
||||
0x6f, 0x61, 0x64, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x12, 0x25, 0x0a,
|
||||
0x0e, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18,
|
||||
0x0a, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x43,
|
||||
0x6f, 0x75, 0x6e, 0x74, 0x22, 0x8e, 0x03, 0x0a, 0x0a, 0x50, 0x65, 0x65, 0x72, 0x50, 0x61, 0x63,
|
||||
0x6b, 0x65, 0x74, 0x12, 0x20, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x02,
|
||||
0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x74,
|
||||
0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x07, 0x73, 0x72, 0x63, 0x5f, 0x70, 0x69, 0x64,
|
||||
0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52,
|
||||
0x06, 0x73, 0x72, 0x63, 0x50, 0x69, 0x64, 0x12, 0x2e, 0x0a, 0x0e, 0x70, 0x61, 0x72, 0x61, 0x6c,
|
||||
0x6c, 0x65, 0x6c, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x42,
|
||||
0x07, 0xfa, 0x42, 0x04, 0x1a, 0x02, 0x28, 0x01, 0x52, 0x0d, 0x70, 0x61, 0x72, 0x61, 0x6c, 0x6c,
|
||||
0x65, 0x6c, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x09, 0x6d, 0x61, 0x69, 0x6e, 0x5f,
|
||||
0x70, 0x65, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x73, 0x63, 0x68,
|
||||
0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x50, 0x61, 0x63, 0x6b, 0x65,
|
||||
0x74, 0x2e, 0x44, 0x65, 0x73, 0x74, 0x50, 0x65, 0x65, 0x72, 0x52, 0x08, 0x6d, 0x61, 0x69, 0x6e,
|
||||
0x50, 0x65, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0b, 0x73, 0x74, 0x65, 0x61, 0x6c, 0x5f, 0x70, 0x65,
|
||||
0x65, 0x72, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x73, 0x63, 0x68, 0x65,
|
||||
0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74,
|
||||
0x2e, 0x44, 0x65, 0x73, 0x74, 0x50, 0x65, 0x65, 0x72, 0x52, 0x0a, 0x73, 0x74, 0x65, 0x61, 0x6c,
|
||||
0x50, 0x65, 0x65, 0x72, 0x73, 0x12, 0x1e, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x07, 0x20,
|
||||
0x01, 0x28, 0x0e, 0x32, 0x0a, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x52,
|
||||
0x04, 0x63, 0x6f, 0x64, 0x65, 0x1a, 0x6e, 0x0a, 0x08, 0x44, 0x65, 0x73, 0x74, 0x50, 0x65, 0x65,
|
||||
0x72, 0x12, 0x17, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa,
|
||||
0x42, 0x04, 0x72, 0x02, 0x70, 0x01, 0x52, 0x02, 0x69, 0x70, 0x12, 0x27, 0x0a, 0x08, 0x72, 0x70,
|
||||
0x63, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x42, 0x0c, 0xfa, 0x42,
|
||||
0x09, 0x1a, 0x07, 0x10, 0xff, 0xff, 0x03, 0x28, 0x80, 0x08, 0x52, 0x07, 0x72, 0x70, 0x63, 0x50,
|
||||
0x6f, 0x72, 0x74, 0x12, 0x20, 0x0a, 0x07, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03,
|
||||
0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x70,
|
||||
0x65, 0x65, 0x72, 0x49, 0x64, 0x22, 0xa6, 0x03, 0x0a, 0x0a, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65,
|
||||
0x73, 0x75, 0x6c, 0x74, 0x12, 0x20, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06,
|
||||
0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x07, 0x73, 0x72, 0x63, 0x5f, 0x70, 0x69,
|
||||
0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x07, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69,
|
||||
0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01,
|
||||
0x52, 0x06, 0x73, 0x72, 0x63, 0x50, 0x69, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x73, 0x74, 0x5f,
|
||||
0x70, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x73, 0x74, 0x50, 0x69,
|
||||
0x64, 0x12, 0x2e, 0x0a, 0x0a, 0x70, 0x69, 0x65, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18,
|
||||
0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x50, 0x69, 0x65,
|
||||
0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09, 0x70, 0x69, 0x65, 0x63, 0x65, 0x49, 0x6e, 0x66,
|
||||
0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18,
|
||||
0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x54, 0x69, 0x6d, 0x65,
|
||||
0x12, 0x19, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01,
|
||||
0x28, 0x04, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73,
|
||||
0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75,
|
||||
0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x1e, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x08, 0x20,
|
||||
0x01, 0x28, 0x0e, 0x32, 0x0a, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x52,
|
||||
0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x2b, 0x0a, 0x09, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x6c, 0x6f,
|
||||
0x61, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e,
|
||||
0x48, 0x6f, 0x73, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x4c, 0x6f,
|
||||
0x61, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x5f, 0x63,
|
||||
0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x66, 0x69, 0x6e, 0x69,
|
||||
0x73, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x8e, 0x03, 0x0a, 0x0a, 0x50, 0x65,
|
||||
0x65, 0x72, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x20, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b,
|
||||
0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02,
|
||||
0x10, 0x01, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x07, 0x73, 0x72,
|
||||
0x63, 0x5f, 0x70, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04,
|
||||
0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x73, 0x72, 0x63, 0x50, 0x69, 0x64, 0x12, 0x2e, 0x0a, 0x0e,
|
||||
0x70, 0x61, 0x72, 0x61, 0x6c, 0x6c, 0x65, 0x6c, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04,
|
||||
0x20, 0x01, 0x28, 0x05, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x1a, 0x02, 0x28, 0x01, 0x52, 0x0d, 0x70,
|
||||
0x61, 0x72, 0x61, 0x6c, 0x6c, 0x65, 0x6c, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x09,
|
||||
0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32,
|
||||
0x1e, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x50, 0x65, 0x65, 0x72,
|
||||
0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x44, 0x65, 0x73, 0x74, 0x50, 0x65, 0x65, 0x72, 0x52,
|
||||
0x08, 0x6d, 0x61, 0x69, 0x6e, 0x50, 0x65, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0b, 0x73, 0x74, 0x65,
|
||||
0x61, 0x6c, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e,
|
||||
0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x50,
|
||||
0x61, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x44, 0x65, 0x73, 0x74, 0x50, 0x65, 0x65, 0x72, 0x52, 0x0a,
|
||||
0x73, 0x74, 0x65, 0x61, 0x6c, 0x50, 0x65, 0x65, 0x72, 0x73, 0x12, 0x1e, 0x0a, 0x04, 0x63, 0x6f,
|
||||
0x64, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0a, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e,
|
||||
0x43, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x1a, 0x6e, 0x0a, 0x08, 0x44, 0x65,
|
||||
0x73, 0x74, 0x50, 0x65, 0x65, 0x72, 0x12, 0x17, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x01, 0x20, 0x01,
|
||||
0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x70, 0x01, 0x52, 0x02, 0x69, 0x70, 0x12,
|
||||
0x27, 0x0a, 0x08, 0x72, 0x70, 0x63, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28,
|
||||
0x05, 0x42, 0x0c, 0xfa, 0x42, 0x09, 0x1a, 0x07, 0x10, 0xff, 0xff, 0x03, 0x28, 0x80, 0x08, 0x52,
|
||||
0x07, 0x72, 0x70, 0x63, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x20, 0x0a, 0x07, 0x70, 0x65, 0x65, 0x72,
|
||||
0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02,
|
||||
0x10, 0x01, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, 0x22, 0xa6, 0x03, 0x0a, 0x0a, 0x50,
|
||||
0x65, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x20, 0x0a, 0x07, 0x74, 0x61, 0x73,
|
||||
0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72,
|
||||
0x02, 0x10, 0x01, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x07, 0x70,
|
||||
0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42,
|
||||
0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1e, 0x0a,
|
||||
0x06, 0x73, 0x72, 0x63, 0x5f, 0x69, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa,
|
||||
0x42, 0x04, 0x72, 0x02, 0x70, 0x01, 0x52, 0x05, 0x73, 0x72, 0x63, 0x49, 0x70, 0x12, 0x27, 0x0a,
|
||||
0x0f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e,
|
||||
0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79,
|
||||
0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x64, 0x63, 0x18, 0x05, 0x20,
|
||||
0x01, 0x28, 0x09, 0x52, 0x03, 0x69, 0x64, 0x63, 0x12, 0x1a, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18,
|
||||
0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x72, 0x03, 0x88, 0x01, 0x01, 0x52,
|
||||
0x03, 0x75, 0x72, 0x6c, 0x12, 0x37, 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f,
|
||||
0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x42, 0x10, 0xfa, 0x42,
|
||||
0x0d, 0x22, 0x0b, 0x28, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01, 0x52, 0x0d,
|
||||
0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a,
|
||||
0x07, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07,
|
||||
0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x73, 0x74, 0x18,
|
||||
0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x73,
|
||||
0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75,
|
||||
0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x1e, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x0b, 0x20,
|
||||
0x01, 0x28, 0x0e, 0x32, 0x0a, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x52,
|
||||
0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x3c, 0x0a, 0x11, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70,
|
||||
0x69, 0x65, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x05,
|
||||
0x42, 0x10, 0xfa, 0x42, 0x0d, 0x1a, 0x0b, 0x28, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||
0xff, 0x01, 0x52, 0x0f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x69, 0x65, 0x63, 0x65, 0x43, 0x6f,
|
||||
0x75, 0x6e, 0x74, 0x22, 0x50, 0x0a, 0x0a, 0x50, 0x65, 0x65, 0x72, 0x54, 0x61, 0x72, 0x67, 0x65,
|
||||
0x74, 0x12, 0x20, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
|
||||
0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x74, 0x61, 0x73,
|
||||
0x6b, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x07, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02,
|
||||
0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x70,
|
||||
0x65, 0x65, 0x72, 0x49, 0x64, 0x22, 0x33, 0x0a, 0x0f, 0x53, 0x74, 0x61, 0x74, 0x54, 0x61, 0x73,
|
||||
0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b,
|
||||
0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02,
|
||||
0x10, 0x01, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x22, 0x94, 0x02, 0x0a, 0x04, 0x54,
|
||||
0x61, 0x73, 0x6b, 0x12, 0x17, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42,
|
||||
0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x04,
|
||||
0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x1a,
|
||||
0x02, 0x28, 0x00, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x2e, 0x0a, 0x0e, 0x63, 0x6f, 0x6e,
|
||||
0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28,
|
||||
0x03, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x22, 0x02, 0x28, 0x01, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x74,
|
||||
0x65, 0x6e, 0x74, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x33, 0x0a, 0x11, 0x74, 0x6f, 0x74,
|
||||
0x61, 0x6c, 0x5f, 0x70, 0x69, 0x65, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04,
|
||||
0x20, 0x01, 0x28, 0x05, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x1a, 0x02, 0x28, 0x01, 0x52, 0x0f, 0x74,
|
||||
0x6f, 0x74, 0x61, 0x6c, 0x50, 0x69, 0x65, 0x63, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d,
|
||||
0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa,
|
||||
0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x26, 0x0a,
|
||||
0x0a, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28,
|
||||
0x05, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x1a, 0x02, 0x28, 0x00, 0x52, 0x09, 0x70, 0x65, 0x65, 0x72,
|
||||
0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2a, 0x0a, 0x10, 0x68, 0x61, 0x73, 0x41, 0x76, 0x61, 0x69,
|
||||
0x6c, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x65, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52,
|
||||
0x10, 0x68, 0x61, 0x73, 0x41, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x65, 0x65,
|
||||
0x72, 0x22, 0xf8, 0x01, 0x0a, 0x13, 0x41, 0x6e, 0x6e, 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x54, 0x61,
|
||||
0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x07, 0x74, 0x61, 0x73,
|
||||
0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72,
|
||||
0x02, 0x10, 0x01, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x03, 0x63,
|
||||
0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10,
|
||||
0x01, 0x52, 0x03, 0x63, 0x69, 0x64, 0x12, 0x32, 0x0a, 0x08, 0x75, 0x72, 0x6c, 0x5f, 0x6d, 0x65,
|
||||
0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e,
|
||||
0x55, 0x72, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10,
|
||||
0x01, 0x52, 0x07, 0x75, 0x72, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x30, 0x0a, 0x09, 0x70, 0x65,
|
||||
0x65, 0x72, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e,
|
||||
0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x48, 0x6f,
|
||||
0x73, 0x74, 0x52, 0x08, 0x70, 0x65, 0x65, 0x72, 0x48, 0x6f, 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x0c,
|
||||
0x70, 0x69, 0x65, 0x63, 0x65, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x05, 0x20, 0x01,
|
||||
0x28, 0x0b, 0x32, 0x11, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65, 0x50,
|
||||
0x61, 0x63, 0x6b, 0x65, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52,
|
||||
0x0b, 0x70, 0x69, 0x65, 0x63, 0x65, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x2a, 0x27, 0x0a, 0x07,
|
||||
0x50, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x07, 0x0a, 0x03, 0x50, 0x32, 0x50, 0x10, 0x00,
|
||||
0x12, 0x07, 0x0a, 0x03, 0x43, 0x44, 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x4f, 0x55,
|
||||
0x52, 0x43, 0x45, 0x10, 0x02, 0x32, 0x9e, 0x03, 0x0a, 0x09, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75,
|
||||
0x6c, 0x65, 0x72, 0x12, 0x49, 0x0a, 0x10, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x50,
|
||||
0x65, 0x65, 0x72, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x1a, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75,
|
||||
0x6c, 0x65, 0x72, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75,
|
||||
0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e,
|
||||
0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x46,
|
||||
0x0a, 0x11, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x50, 0x69, 0x65, 0x63, 0x65, 0x52, 0x65, 0x73,
|
||||
0x75, 0x6c, 0x74, 0x12, 0x16, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e,
|
||||
0x50, 0x69, 0x65, 0x63, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x1a, 0x15, 0x2e, 0x73, 0x63,
|
||||
0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x50, 0x61, 0x63, 0x6b,
|
||||
0x65, 0x74, 0x28, 0x01, 0x30, 0x01, 0x12, 0x41, 0x0a, 0x10, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74,
|
||||
0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x15, 0x2e, 0x73, 0x63, 0x68,
|
||||
0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c,
|
||||
0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x3a, 0x0a, 0x09, 0x4c, 0x65, 0x61,
|
||||
0x76, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x15, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c,
|
||||
0x65, 0x72, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x1a, 0x16, 0x2e,
|
||||
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
|
||||
0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x37, 0x0a, 0x08, 0x53, 0x74, 0x61, 0x74, 0x54, 0x61, 0x73,
|
||||
0x6b, 0x12, 0x1a, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x53, 0x74,
|
||||
0x61, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0f, 0x2e,
|
||||
0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x46,
|
||||
0x0a, 0x0c, 0x41, 0x6e, 0x6e, 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x1e,
|
||||
0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x75,
|
||||
0x6e, 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16,
|
||||
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
|
||||
0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x27, 0x5a, 0x25, 0x64, 0x37, 0x79, 0x2e, 0x69, 0x6f,
|
||||
0x2f, 0x64, 0x72, 0x61, 0x67, 0x6f, 0x6e, 0x66, 0x6c, 0x79, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x6b,
|
||||
0x67, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x62,
|
||||
0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x06, 0x73, 0x72, 0x63, 0x5f,
|
||||
0x69, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x70,
|
||||
0x01, 0x52, 0x05, 0x73, 0x72, 0x63, 0x49, 0x70, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x65, 0x63, 0x75,
|
||||
0x72, 0x69, 0x74, 0x79, 0x5f, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28,
|
||||
0x09, 0x52, 0x0e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x6f, 0x6d, 0x61, 0x69,
|
||||
0x6e, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x64, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03,
|
||||
0x69, 0x64, 0x63, 0x12, 0x1a, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09,
|
||||
0x42, 0x08, 0xfa, 0x42, 0x05, 0x72, 0x03, 0x88, 0x01, 0x01, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12,
|
||||
0x37, 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74,
|
||||
0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x42, 0x10, 0xfa, 0x42, 0x0d, 0x22, 0x0b, 0x28, 0xff,
|
||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65,
|
||||
0x6e, 0x74, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x72, 0x61, 0x66,
|
||||
0x66, 0x69, 0x63, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x74, 0x72, 0x61, 0x66, 0x66,
|
||||
0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x73, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0d,
|
||||
0x52, 0x04, 0x63, 0x6f, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73,
|
||||
0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73,
|
||||
0x12, 0x1e, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0a,
|
||||
0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65,
|
||||
0x12, 0x3c, 0x0a, 0x11, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x69, 0x65, 0x63, 0x65, 0x5f,
|
||||
0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x05, 0x42, 0x10, 0xfa, 0x42, 0x0d,
|
||||
0x1a, 0x0b, 0x28, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01, 0x52, 0x0f, 0x74,
|
||||
0x6f, 0x74, 0x61, 0x6c, 0x50, 0x69, 0x65, 0x63, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x50,
|
||||
0x0a, 0x0a, 0x50, 0x65, 0x65, 0x72, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x20, 0x0a, 0x07,
|
||||
0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa,
|
||||
0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x20,
|
||||
0x0a, 0x07, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42,
|
||||
0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64,
|
||||
0x22, 0x33, 0x0a, 0x0f, 0x53, 0x74, 0x61, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75,
|
||||
0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x74,
|
||||
0x61, 0x73, 0x6b, 0x49, 0x64, 0x22, 0x94, 0x02, 0x0a, 0x04, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x17,
|
||||
0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72,
|
||||
0x02, 0x10, 0x01, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18,
|
||||
0x02, 0x20, 0x01, 0x28, 0x05, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x1a, 0x02, 0x28, 0x00, 0x52, 0x04,
|
||||
0x74, 0x79, 0x70, 0x65, 0x12, 0x2e, 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f,
|
||||
0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x07, 0xfa, 0x42,
|
||||
0x04, 0x22, 0x02, 0x28, 0x01, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4c, 0x65,
|
||||
0x6e, 0x67, 0x74, 0x68, 0x12, 0x33, 0x0a, 0x11, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x69,
|
||||
0x65, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x42,
|
||||
0x07, 0xfa, 0x42, 0x04, 0x1a, 0x02, 0x28, 0x01, 0x52, 0x0f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50,
|
||||
0x69, 0x65, 0x63, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x05, 0x73, 0x74, 0x61,
|
||||
0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10,
|
||||
0x01, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x26, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72,
|
||||
0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x05, 0x42, 0x07, 0xfa, 0x42,
|
||||
0x04, 0x1a, 0x02, 0x28, 0x00, 0x52, 0x09, 0x70, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74,
|
||||
0x12, 0x2a, 0x0a, 0x10, 0x68, 0x61, 0x73, 0x41, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65,
|
||||
0x50, 0x65, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x68, 0x61, 0x73, 0x41,
|
||||
0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x65, 0x65, 0x72, 0x22, 0xf8, 0x01, 0x0a,
|
||||
0x13, 0x41, 0x6e, 0x6e, 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71,
|
||||
0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06,
|
||||
0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x03, 0x63, 0x69, 0x64, 0x18, 0x02, 0x20,
|
||||
0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x03, 0x63, 0x69,
|
||||
0x64, 0x12, 0x32, 0x0a, 0x08, 0x75, 0x72, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x03, 0x20,
|
||||
0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x55, 0x72, 0x6c, 0x4d, 0x65,
|
||||
0x74, 0x61, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x07, 0x75, 0x72,
|
||||
0x6c, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x30, 0x0a, 0x09, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x68, 0x6f,
|
||||
0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64,
|
||||
0x75, 0x6c, 0x65, 0x72, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x48, 0x6f, 0x73, 0x74, 0x52, 0x08, 0x70,
|
||||
0x65, 0x65, 0x72, 0x48, 0x6f, 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x0c, 0x70, 0x69, 0x65, 0x63, 0x65,
|
||||
0x5f, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e,
|
||||
0x62, 0x61, 0x73, 0x65, 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74,
|
||||
0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0b, 0x70, 0x69, 0x65, 0x63,
|
||||
0x65, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x2a, 0x27, 0x0a, 0x07, 0x50, 0x61, 0x74, 0x74, 0x65,
|
||||
0x72, 0x6e, 0x12, 0x07, 0x0a, 0x03, 0x50, 0x32, 0x50, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x43,
|
||||
0x44, 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x02,
|
||||
0x32, 0x9e, 0x03, 0x0a, 0x09, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x12, 0x49,
|
||||
0x0a, 0x10, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x50, 0x65, 0x65, 0x72, 0x54, 0x61,
|
||||
0x73, 0x6b, 0x12, 0x1a, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x50,
|
||||
0x65, 0x65, 0x72, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19,
|
||||
0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73,
|
||||
0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x46, 0x0a, 0x11, 0x52, 0x65, 0x70,
|
||||
0x6f, 0x72, 0x74, 0x50, 0x69, 0x65, 0x63, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x16,
|
||||
0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65,
|
||||
0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x1a, 0x15, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c,
|
||||
0x65, 0x72, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x28, 0x01, 0x30,
|
||||
0x01, 0x12, 0x41, 0x0a, 0x10, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x50, 0x65, 0x65, 0x72, 0x52,
|
||||
0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x15, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65,
|
||||
0x72, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x1a, 0x16, 0x2e, 0x67,
|
||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45,
|
||||
0x6d, 0x70, 0x74, 0x79, 0x12, 0x3a, 0x0a, 0x09, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x54, 0x61, 0x73,
|
||||
0x6b, 0x12, 0x15, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x50, 0x65,
|
||||
0x65, 0x72, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
||||
0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79,
|
||||
0x12, 0x37, 0x0a, 0x08, 0x53, 0x74, 0x61, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x1a, 0x2e, 0x73,
|
||||
0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x54, 0x61, 0x73,
|
||||
0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0f, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64,
|
||||
0x75, 0x6c, 0x65, 0x72, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x46, 0x0a, 0x0c, 0x41, 0x6e, 0x6e,
|
||||
0x6f, 0x75, 0x6e, 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x1e, 0x2e, 0x73, 0x63, 0x68, 0x65,
|
||||
0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x54, 0x61,
|
||||
0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
|
||||
0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74,
|
||||
0x79, 0x42, 0x27, 0x5a, 0x25, 0x64, 0x37, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x64, 0x72, 0x61, 0x67,
|
||||
0x6f, 0x6e, 0x66, 0x6c, 0x79, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x72, 0x70, 0x63,
|
||||
0x2f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
|
|
|
|||
|
|
@ -43,9 +43,6 @@ var (
|
|||
_ = base.Code(0)
|
||||
)
|
||||
|
||||
// define the regex for a UUID once up-front
|
||||
var _scheduler_uuidPattern = regexp.MustCompile("^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$")
|
||||
|
||||
// Validate checks the field values on PeerTaskRequest with the rules defined
|
||||
// in the proto definition for this message. If any rules are violated, an
|
||||
// error is returned.
|
||||
|
|
@ -366,11 +363,10 @@ func (m *PeerHost) Validate() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
if err := m._validateUuid(m.GetUuid()); err != nil {
|
||||
if utf8.RuneCountInString(m.GetId()) < 1 {
|
||||
return PeerHostValidationError{
|
||||
field: "Uuid",
|
||||
reason: "value must be a valid UUID",
|
||||
cause: err,
|
||||
field: "Id",
|
||||
reason: "value length must be at least 1 runes",
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -444,14 +440,6 @@ func (m *PeerHost) _validateHostname(host string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m *PeerHost) _validateUuid(uuid string) error {
|
||||
if matched := _scheduler_uuidPattern.MatchString(uuid); !matched {
|
||||
return errors.New("invalid uuid format")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// PeerHostValidationError is the validation error returned by
|
||||
// PeerHost.Validate if the designated constraints aren't met.
|
||||
type PeerHostValidationError struct {
|
||||
|
|
|
|||
|
|
@ -74,8 +74,8 @@ message SinglePiece{
|
|||
}
|
||||
|
||||
message PeerHost{
|
||||
// each time the daemon starts, it will generate a different uuid
|
||||
string uuid = 1 [(validate.rules).string.uuid = true];
|
||||
// each time the daemon starts, it will generate a different id
|
||||
string id = 1 [(validate.rules).string.min_len = 1];
|
||||
// peer host ip
|
||||
string ip = 2 [(validate.rules).string.ip = true];
|
||||
// rpc service port for peer
|
||||
|
|
|
|||
|
|
@ -84,7 +84,7 @@ func New() *Config {
|
|||
},
|
||||
},
|
||||
DynConfig: &DynConfig{
|
||||
RefreshInterval: 1 * time.Minute,
|
||||
RefreshInterval: 10 * time.Second,
|
||||
},
|
||||
Host: &HostConfig{},
|
||||
Manager: &ManagerConfig{
|
||||
|
|
|
|||
|
|
@ -141,7 +141,7 @@ func TestConfig_New(t *testing.T) {
|
|||
},
|
||||
},
|
||||
DynConfig: &DynConfig{
|
||||
RefreshInterval: 1 * time.Minute,
|
||||
RefreshInterval: 10 * time.Second,
|
||||
},
|
||||
Host: &HostConfig{},
|
||||
Manager: &ManagerConfig{
|
||||
|
|
|
|||
|
|
@ -17,8 +17,8 @@
|
|||
package config
|
||||
|
||||
const (
|
||||
// Default number of cdn load limit.
|
||||
DefaultCDNLoadLimit = 300
|
||||
// Default number of seed peer load limit.
|
||||
DefaultSeedPeerLoadLimit = 300
|
||||
|
||||
// Default number of client load limit.
|
||||
DefaultClientLoadLimit = 50
|
||||
|
|
|
|||
|
|
@ -41,7 +41,6 @@ var (
|
|||
|
||||
type DynconfigData struct {
|
||||
SeedPeers []*SeedPeer `yaml:"seedPeers" mapstructure:"seedPeers" json:"seed_peers"`
|
||||
CDNs []*CDN `yaml:"cdns" mapstructure:"cdns" json:"cdns"`
|
||||
SchedulerCluster *SchedulerCluster `yaml:"schedulerCluster" mapstructure:"schedulerCluster" json:"scheduler_cluster"`
|
||||
}
|
||||
|
||||
|
|
@ -49,6 +48,7 @@ type SeedPeer struct {
|
|||
ID uint `yaml:"id" mapstructure:"id" json:"id"`
|
||||
Hostname string `yaml:"hostname" mapstructure:"hostname" json:"host_name"`
|
||||
Type string `yaml:"type" mapstructure:"type" json:"type"`
|
||||
IsCDN bool `yaml:"isCDN" mapstructure:"isCDN" json:"is_cdn"`
|
||||
IDC string `yaml:"idc" mapstructure:"idc" json:"idc"`
|
||||
NetTopology string `yaml:"netTopology" mapstructure:"netTopology" json:"net_topology"`
|
||||
Location string `yaml:"location" mapstructure:"location" json:"location"`
|
||||
|
|
@ -75,34 +75,6 @@ type SeedPeerCluster struct {
|
|||
Config []byte `yaml:"config" mapstructure:"config" json:"config"`
|
||||
}
|
||||
|
||||
type CDN struct {
|
||||
ID uint `yaml:"id" mapstructure:"id" json:"id"`
|
||||
Hostname string `yaml:"hostname" mapstructure:"hostname" json:"host_name"`
|
||||
IDC string `yaml:"idc" mapstructure:"idc" json:"idc"`
|
||||
Location string `yaml:"location" mapstructure:"location" json:"location"`
|
||||
IP string `yaml:"ip" mapstructure:"ip" json:"ip"`
|
||||
Port int32 `yaml:"port" mapstructure:"port" json:"port"`
|
||||
DownloadPort int32 `yaml:"downloadPort" mapstructure:"downloadPort" json:"download_port"`
|
||||
CDNCluster *CDNCluster `yaml:"cdnCluster" mapstructure:"cdnCluster" json:"cdn_cluster"`
|
||||
}
|
||||
|
||||
func (c *CDN) GetCDNClusterConfig() (types.CDNClusterConfig, bool) {
|
||||
if c.CDNCluster == nil {
|
||||
return types.CDNClusterConfig{}, false
|
||||
}
|
||||
|
||||
var config types.CDNClusterConfig
|
||||
if err := json.Unmarshal(c.CDNCluster.Config, &config); err != nil {
|
||||
return types.CDNClusterConfig{}, false
|
||||
}
|
||||
|
||||
return config, true
|
||||
}
|
||||
|
||||
type CDNCluster struct {
|
||||
Config []byte `yaml:"config" mapstructure:"config" json:"config"`
|
||||
}
|
||||
|
||||
type SchedulerCluster struct {
|
||||
Config []byte `yaml:"config" mapstructure:"config" json:"config"`
|
||||
ClientConfig []byte `yaml:"clientConfig" mapstructure:"clientConfig" json:"client_config"`
|
||||
|
|
|
|||
|
|
@ -62,14 +62,6 @@ func TestDynconfig_GetManagerSourceType(t *testing.T) {
|
|||
sleep: func() {},
|
||||
mock: func(m *mocks.MockClientMockRecorder) {
|
||||
m.GetScheduler(gomock.Any()).Return(&manager.Scheduler{
|
||||
Cdns: []*manager.CDN{
|
||||
{
|
||||
HostName: "foo",
|
||||
Ip: "127.0.0.1",
|
||||
Port: 8001,
|
||||
DownloadPort: 8003,
|
||||
},
|
||||
},
|
||||
SeedPeers: []*manager.SeedPeer{
|
||||
{
|
||||
HostName: "bar",
|
||||
|
|
@ -82,10 +74,6 @@ func TestDynconfig_GetManagerSourceType(t *testing.T) {
|
|||
},
|
||||
expect: func(t *testing.T, data *DynconfigData, err error) {
|
||||
assert := assert.New(t)
|
||||
assert.Equal(data.CDNs[0].Hostname, "foo")
|
||||
assert.Equal(data.CDNs[0].IP, "127.0.0.1")
|
||||
assert.Equal(data.CDNs[0].Port, int32(8001))
|
||||
assert.Equal(data.CDNs[0].DownloadPort, int32(8003))
|
||||
assert.Equal(data.SeedPeers[0].Hostname, "bar")
|
||||
assert.Equal(data.SeedPeers[0].IP, "127.0.0.1")
|
||||
assert.Equal(data.SeedPeers[0].Port, int32(8001))
|
||||
|
|
@ -106,14 +94,6 @@ func TestDynconfig_GetManagerSourceType(t *testing.T) {
|
|||
mock: func(m *mocks.MockClientMockRecorder) {
|
||||
gomock.InOrder(
|
||||
m.GetScheduler(gomock.Any()).Return(&manager.Scheduler{
|
||||
Cdns: []*manager.CDN{
|
||||
{
|
||||
HostName: "foo",
|
||||
Ip: "127.0.0.1",
|
||||
Port: 8001,
|
||||
DownloadPort: 8003,
|
||||
},
|
||||
},
|
||||
SeedPeers: []*manager.SeedPeer{
|
||||
{
|
||||
HostName: "bar",
|
||||
|
|
@ -128,10 +108,6 @@ func TestDynconfig_GetManagerSourceType(t *testing.T) {
|
|||
},
|
||||
expect: func(t *testing.T, data *DynconfigData, err error) {
|
||||
assert := assert.New(t)
|
||||
assert.Equal(data.CDNs[0].Hostname, "foo")
|
||||
assert.Equal(data.CDNs[0].IP, "127.0.0.1")
|
||||
assert.Equal(data.CDNs[0].Port, int32(8001))
|
||||
assert.Equal(data.CDNs[0].DownloadPort, int32(8003))
|
||||
assert.Equal(data.SeedPeers[0].Hostname, "bar")
|
||||
assert.Equal(data.SeedPeers[0].IP, "127.0.0.1")
|
||||
assert.Equal(data.SeedPeers[0].Port, int32(8001))
|
||||
|
|
|
|||
|
|
@ -133,7 +133,7 @@ var (
|
|||
Subsystem: constants.SchedulerMetricsName,
|
||||
Name: "peer_host_traffic",
|
||||
Help: "Counter of the number of per peer host traffic.",
|
||||
}, []string{"biz_tag", "traffic_type", "peer_host_uuid", "peer_host_ip"})
|
||||
}, []string{"biz_tag", "traffic_type", "peer_host_id", "peer_host_ip"})
|
||||
|
||||
PeerTaskCounter = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: constants.MetricsNamespace,
|
||||
|
|
|
|||
|
|
@ -54,14 +54,6 @@ func WithUploadLoadLimit(limit int32) HostOption {
|
|||
}
|
||||
}
|
||||
|
||||
// WithIsCDN sets host's IsCDN.
|
||||
func WithIsCDN(isCDN bool) HostOption {
|
||||
return func(h *Host) *Host {
|
||||
h.IsCDN = isCDN
|
||||
return h
|
||||
}
|
||||
}
|
||||
|
||||
// WithHostType sets host's type.
|
||||
func WithHostType(hostType HostType) HostOption {
|
||||
return func(h *Host) *Host {
|
||||
|
|
@ -115,9 +107,6 @@ type Host struct {
|
|||
// PeerCount is peer count.
|
||||
PeerCount *atomic.Int32
|
||||
|
||||
// IsCDN is used as tag cdn.
|
||||
IsCDN bool
|
||||
|
||||
// CreateAt is host create time.
|
||||
CreateAt *atomic.Time
|
||||
|
||||
|
|
@ -131,7 +120,7 @@ type Host struct {
|
|||
// New host instance.
|
||||
func NewHost(rawHost *scheduler.PeerHost, options ...HostOption) *Host {
|
||||
h := &Host{
|
||||
ID: rawHost.Uuid,
|
||||
ID: rawHost.Id,
|
||||
Type: HostTypeNormal,
|
||||
IP: rawHost.Ip,
|
||||
Hostname: rawHost.HostName,
|
||||
|
|
@ -145,10 +134,9 @@ func NewHost(rawHost *scheduler.PeerHost, options ...HostOption) *Host {
|
|||
UploadPeerCount: atomic.NewInt32(0),
|
||||
Peers: &sync.Map{},
|
||||
PeerCount: atomic.NewInt32(0),
|
||||
IsCDN: false,
|
||||
CreateAt: atomic.NewTime(time.Now()),
|
||||
UpdateAt: atomic.NewTime(time.Now()),
|
||||
Log: logger.WithHostID(rawHost.Uuid),
|
||||
Log: logger.WithHostID(rawHost.Id),
|
||||
}
|
||||
|
||||
for _, opt := range options {
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ import (
|
|||
|
||||
var (
|
||||
mockRawHost = &scheduler.PeerHost{
|
||||
Uuid: idgen.HostID("hostname", 8003),
|
||||
Id: idgen.HostID("hostname", 8003),
|
||||
Ip: "127.0.0.1",
|
||||
RpcPort: 8003,
|
||||
DownPort: 8001,
|
||||
|
|
@ -40,23 +40,11 @@ var (
|
|||
}
|
||||
|
||||
mockRawSeedHost = &scheduler.PeerHost{
|
||||
Uuid: idgen.SeedHostID("hostname", 8003),
|
||||
Id: idgen.HostID("hostname_seed", 8003),
|
||||
Ip: "127.0.0.1",
|
||||
RpcPort: 8003,
|
||||
DownPort: 8001,
|
||||
HostName: "hostname",
|
||||
SecurityDomain: "security_domain",
|
||||
Location: "location",
|
||||
Idc: "idc",
|
||||
NetTopology: "net_topology",
|
||||
}
|
||||
|
||||
mockRawCDNHost = &scheduler.PeerHost{
|
||||
Uuid: idgen.CDNHostID("hostname", 8003),
|
||||
Ip: "127.0.0.1",
|
||||
RpcPort: 8003,
|
||||
DownPort: 8001,
|
||||
HostName: "hostname",
|
||||
HostName: "hostname_seed",
|
||||
SecurityDomain: "security_domain",
|
||||
Location: "location",
|
||||
Idc: "idc",
|
||||
|
|
@ -76,7 +64,7 @@ func TestHost_NewHost(t *testing.T) {
|
|||
rawHost: mockRawHost,
|
||||
expect: func(t *testing.T, host *Host) {
|
||||
assert := assert.New(t)
|
||||
assert.Equal(host.ID, mockRawHost.Uuid)
|
||||
assert.Equal(host.ID, mockRawHost.Id)
|
||||
assert.Equal(host.Type, HostTypeNormal)
|
||||
assert.Equal(host.IP, mockRawHost.Ip)
|
||||
assert.Equal(host.Port, mockRawHost.RpcPort)
|
||||
|
|
@ -88,7 +76,6 @@ func TestHost_NewHost(t *testing.T) {
|
|||
assert.Equal(host.NetTopology, mockRawHost.NetTopology)
|
||||
assert.Equal(host.UploadLoadLimit.Load(), int32(config.DefaultClientLoadLimit))
|
||||
assert.Equal(host.PeerCount.Load(), int32(0))
|
||||
assert.Equal(host.IsCDN, false)
|
||||
assert.NotEqual(host.CreateAt.Load(), 0)
|
||||
assert.NotEqual(host.UpdateAt.Load(), 0)
|
||||
assert.NotNil(host.Log)
|
||||
|
|
@ -97,10 +84,10 @@ func TestHost_NewHost(t *testing.T) {
|
|||
{
|
||||
name: "new seed host",
|
||||
rawHost: mockRawSeedHost,
|
||||
options: []HostOption{WithHostType(HostTypeSuperSeed), WithIsCDN(true)},
|
||||
options: []HostOption{WithHostType(HostTypeSuperSeed)},
|
||||
expect: func(t *testing.T, host *Host) {
|
||||
assert := assert.New(t)
|
||||
assert.Equal(host.ID, mockRawSeedHost.Uuid)
|
||||
assert.Equal(host.ID, mockRawSeedHost.Id)
|
||||
assert.Equal(host.Type, HostTypeSuperSeed)
|
||||
assert.Equal(host.IP, mockRawSeedHost.Ip)
|
||||
assert.Equal(host.Port, mockRawSeedHost.RpcPort)
|
||||
|
|
@ -112,7 +99,6 @@ func TestHost_NewHost(t *testing.T) {
|
|||
assert.Equal(host.NetTopology, mockRawSeedHost.NetTopology)
|
||||
assert.Equal(host.UploadLoadLimit.Load(), int32(config.DefaultClientLoadLimit))
|
||||
assert.Equal(host.PeerCount.Load(), int32(0))
|
||||
assert.Equal(host.IsCDN, true)
|
||||
assert.NotEqual(host.CreateAt.Load(), 0)
|
||||
assert.NotEqual(host.UpdateAt.Load(), 0)
|
||||
assert.NotNil(host.Log)
|
||||
|
|
@ -124,7 +110,7 @@ func TestHost_NewHost(t *testing.T) {
|
|||
options: []HostOption{WithUploadLoadLimit(200)},
|
||||
expect: func(t *testing.T, host *Host) {
|
||||
assert := assert.New(t)
|
||||
assert.Equal(host.ID, mockRawHost.Uuid)
|
||||
assert.Equal(host.ID, mockRawHost.Id)
|
||||
assert.Equal(host.Type, HostTypeNormal)
|
||||
assert.Equal(host.IP, mockRawHost.Ip)
|
||||
assert.Equal(host.Port, mockRawHost.RpcPort)
|
||||
|
|
@ -136,7 +122,6 @@ func TestHost_NewHost(t *testing.T) {
|
|||
assert.Equal(host.NetTopology, mockRawHost.NetTopology)
|
||||
assert.Equal(host.UploadLoadLimit.Load(), int32(200))
|
||||
assert.Equal(host.PeerCount.Load(), int32(0))
|
||||
assert.Equal(host.IsCDN, false)
|
||||
assert.NotEqual(host.CreateAt.Load(), 0)
|
||||
assert.NotEqual(host.UpdateAt.Load(), 0)
|
||||
assert.NotNil(host.Log)
|
||||
|
|
|
|||
|
|
@ -118,11 +118,12 @@ func TestResource_New(t *testing.T) {
|
|||
dynconfig.Get().Return(&config.DynconfigData{
|
||||
SeedPeers: []*config.SeedPeer{},
|
||||
}, nil).Times(1),
|
||||
dynconfig.Register(gomock.Any()).Return().Times(1),
|
||||
)
|
||||
},
|
||||
expect: func(t *testing.T, resource Resource, err error) {
|
||||
assert := assert.New(t)
|
||||
assert.EqualError(err, "address list of cdn is empty")
|
||||
assert.NoError(err)
|
||||
},
|
||||
},
|
||||
{
|
||||
|
|
|
|||
|
|
@ -137,10 +137,10 @@ func (s *seedPeer) initSeedPeer(task *Task, ps *cdnsystem.PieceSeed) (*Peer, err
|
|||
task.Log.Infof("can not find seed peer: %s", ps.PeerId)
|
||||
|
||||
// Load host from manager.
|
||||
host, ok := s.hostManager.Load(ps.HostUuid)
|
||||
host, ok := s.hostManager.Load(ps.HostId)
|
||||
if !ok {
|
||||
task.Log.Errorf("can not find seed host uuid: %s", ps.HostUuid)
|
||||
return nil, errors.Errorf("can not find host uuid: %s", ps.HostUuid)
|
||||
task.Log.Errorf("can not find seed host id: %s", ps.HostId)
|
||||
return nil, errors.Errorf("can not find host id: %s", ps.HostId)
|
||||
}
|
||||
|
||||
// New seed peer.
|
||||
|
|
|
|||
|
|
@ -60,22 +60,13 @@ func newSeedPeerClient(dynconfig config.DynconfigInterface, hostManager HostMana
|
|||
}
|
||||
|
||||
// Initialize seed peer grpc client.
|
||||
netAddrs := append(seedPeersToNetAddrs(config.SeedPeers), cdnsToNetAddrs(config.CDNs)...)
|
||||
client, err := client.GetClientByAddr(netAddrs, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client := client.GetClientByAddr(seedPeersToNetAddrs(config.SeedPeers), opts...)
|
||||
|
||||
// Initialize seed hosts.
|
||||
for _, host := range seedPeersToHosts(config.SeedPeers) {
|
||||
hostManager.Store(host)
|
||||
}
|
||||
|
||||
// Initialize cdn hosts.
|
||||
for _, host := range cdnsToHosts(config.CDNs) {
|
||||
hostManager.Store(host)
|
||||
}
|
||||
|
||||
dc := &seedPeerClient{
|
||||
hostManager: hostManager,
|
||||
CdnClient: client,
|
||||
|
|
@ -93,13 +84,8 @@ func (sc *seedPeerClient) OnNotify(data *config.DynconfigData) {
|
|||
seedPeers = append(seedPeers, *seedPeer)
|
||||
}
|
||||
|
||||
var cdns []config.CDN
|
||||
for _, cdn := range data.CDNs {
|
||||
cdns = append(cdns, *cdn)
|
||||
}
|
||||
|
||||
if reflect.DeepEqual(sc.data, data) {
|
||||
logger.Infof("addresses deep equal: %#v %#v", seedPeers, cdns)
|
||||
logger.Infof("addresses deep equal: %#v", seedPeers)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -107,7 +93,17 @@ func (sc *seedPeerClient) OnNotify(data *config.DynconfigData) {
|
|||
// the seed peer needs to be cleared.
|
||||
diffSeedPeers := diffSeedPeers(sc.data.SeedPeers, data.SeedPeers)
|
||||
for _, seedPeer := range diffSeedPeers {
|
||||
id := idgen.SeedHostID(seedPeer.Hostname, seedPeer.Port)
|
||||
if seedPeer.IsCDN {
|
||||
id := idgen.CDNHostID(seedPeer.Hostname, seedPeer.Port)
|
||||
if host, ok := sc.hostManager.Load(id); ok {
|
||||
host.LeavePeers()
|
||||
sc.hostManager.Delete(id)
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
id := idgen.HostID(seedPeer.Hostname, seedPeer.Port)
|
||||
if host, ok := sc.hostManager.Load(id); ok {
|
||||
host.LeavePeers()
|
||||
sc.hostManager.Delete(id)
|
||||
|
|
@ -119,29 +115,12 @@ func (sc *seedPeerClient) OnNotify(data *config.DynconfigData) {
|
|||
sc.hostManager.Store(host)
|
||||
}
|
||||
|
||||
// If only the ip of the cdn host is changed,
|
||||
// the cdn peer needs to be cleared.
|
||||
diffCDNs := diffCDNs(sc.data.CDNs, data.CDNs)
|
||||
for _, cdn := range diffCDNs {
|
||||
id := idgen.CDNHostID(cdn.Hostname, cdn.Port)
|
||||
if host, ok := sc.hostManager.Load(id); ok {
|
||||
host.LeavePeers()
|
||||
sc.hostManager.Delete(id)
|
||||
}
|
||||
}
|
||||
|
||||
// Update cdn in host manager.
|
||||
for _, host := range cdnsToHosts(data.CDNs) {
|
||||
sc.hostManager.Store(host)
|
||||
}
|
||||
|
||||
// Update dynamic data.
|
||||
sc.data = data
|
||||
|
||||
// Update grpc seed peer addresses.
|
||||
netAddrs := append(seedPeersToNetAddrs(data.SeedPeers), cdnsToNetAddrs(data.CDNs)...)
|
||||
sc.UpdateState(netAddrs)
|
||||
logger.Infof("addresses have been updated: %#v %#v", seedPeers, cdns)
|
||||
sc.UpdateState(seedPeersToNetAddrs(data.SeedPeers))
|
||||
logger.Infof("addresses have been updated: %#v", seedPeers)
|
||||
}
|
||||
|
||||
// seedPeersToHosts coverts []*config.SeedPeer to map[string]*Host.
|
||||
|
|
@ -153,9 +132,25 @@ func seedPeersToHosts(seedPeers []*config.SeedPeer) map[string]*Host {
|
|||
options = append(options, WithUploadLoadLimit(int32(config.LoadLimit)))
|
||||
}
|
||||
|
||||
id := idgen.SeedHostID(seedPeer.Hostname, seedPeer.Port)
|
||||
if seedPeer.IsCDN {
|
||||
id := idgen.CDNHostID(seedPeer.Hostname, seedPeer.Port)
|
||||
hosts[id] = NewHost(&rpcscheduler.PeerHost{
|
||||
Uuid: id,
|
||||
Id: id,
|
||||
Ip: seedPeer.IP,
|
||||
RpcPort: seedPeer.Port,
|
||||
DownPort: seedPeer.DownloadPort,
|
||||
HostName: seedPeer.Hostname,
|
||||
Idc: seedPeer.IDC,
|
||||
Location: seedPeer.Location,
|
||||
NetTopology: seedPeer.NetTopology,
|
||||
}, options...)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
id := idgen.HostID(seedPeer.Hostname, seedPeer.Port)
|
||||
hosts[id] = NewHost(&rpcscheduler.PeerHost{
|
||||
Id: id,
|
||||
Ip: seedPeer.IP,
|
||||
RpcPort: seedPeer.Port,
|
||||
DownPort: seedPeer.DownloadPort,
|
||||
|
|
@ -222,89 +217,19 @@ func diffSeedPeers(sx []*config.SeedPeer, sy []*config.SeedPeer) []*config.SeedP
|
|||
for _, x := range sx {
|
||||
found := false
|
||||
for _, y := range sy {
|
||||
if idgen.SeedHostID(x.Hostname, x.Port) == idgen.SeedHostID(y.Hostname, y.Port) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
diff = append(diff, x)
|
||||
}
|
||||
}
|
||||
|
||||
return diff
|
||||
}
|
||||
|
||||
// cdnsToHosts coverts []*config.CDN to map[string]*Host.
|
||||
func cdnsToHosts(cdns []*config.CDN) map[string]*Host {
|
||||
hosts := map[string]*Host{}
|
||||
for _, cdn := range cdns {
|
||||
var netTopology string
|
||||
options := []HostOption{WithHostType(HostTypeSuperSeed), WithIsCDN(true)}
|
||||
if config, ok := cdn.GetCDNClusterConfig(); ok && config.LoadLimit > 0 {
|
||||
options = append(options, WithUploadLoadLimit(int32(config.LoadLimit)))
|
||||
netTopology = config.NetTopology
|
||||
}
|
||||
|
||||
id := idgen.CDNHostID(cdn.Hostname, cdn.Port)
|
||||
hosts[id] = NewHost(&rpcscheduler.PeerHost{
|
||||
Uuid: id,
|
||||
Ip: cdn.IP,
|
||||
RpcPort: cdn.Port,
|
||||
DownPort: cdn.DownloadPort,
|
||||
HostName: cdn.Hostname,
|
||||
Idc: cdn.IDC,
|
||||
Location: cdn.Location,
|
||||
NetTopology: netTopology,
|
||||
}, options...)
|
||||
}
|
||||
return hosts
|
||||
}
|
||||
|
||||
// cdnsToNetAddrs coverts []*config.CDN to []dfnet.NetAddr.
|
||||
func cdnsToNetAddrs(cdns []*config.CDN) []dfnet.NetAddr {
|
||||
netAddrs := make([]dfnet.NetAddr, 0, len(cdns))
|
||||
for _, cdn := range cdns {
|
||||
netAddrs = append(netAddrs, dfnet.NetAddr{
|
||||
Type: dfnet.TCP,
|
||||
Addr: fmt.Sprintf("%s:%d", cdn.IP, cdn.Port),
|
||||
})
|
||||
}
|
||||
|
||||
return netAddrs
|
||||
}
|
||||
|
||||
// diffCDNs find out different cdns.
|
||||
func diffCDNs(cx []*config.CDN, cy []*config.CDN) []*config.CDN {
|
||||
// Get cdns with the same HostID but different IP.
|
||||
var diff []*config.CDN
|
||||
for _, x := range cx {
|
||||
for _, y := range cy {
|
||||
if x.Hostname != y.Hostname {
|
||||
continue
|
||||
}
|
||||
|
||||
if x.Port != y.Port {
|
||||
continue
|
||||
}
|
||||
|
||||
if x.IP == y.IP {
|
||||
continue
|
||||
}
|
||||
|
||||
diff = append(diff, x)
|
||||
}
|
||||
}
|
||||
|
||||
// Get the removed cdns.
|
||||
for _, x := range cx {
|
||||
found := false
|
||||
for _, y := range cy {
|
||||
if x.IsCDN {
|
||||
if idgen.CDNHostID(x.Hostname, x.Port) == idgen.CDNHostID(y.Hostname, y.Port) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
if idgen.HostID(x.Hostname, x.Port) == idgen.HostID(y.Hostname, y.Port) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
|
|
|
|||
|
|
@ -54,22 +54,6 @@ func TestSeedPeerClient_newSeedPeerClient(t *testing.T) {
|
|||
assert.NoError(err)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "new seed peer client with cdn",
|
||||
mock: func(dynconfig *configmocks.MockDynconfigInterfaceMockRecorder, hostManager *MockHostManagerMockRecorder) {
|
||||
gomock.InOrder(
|
||||
dynconfig.Get().Return(&config.DynconfigData{
|
||||
CDNs: []*config.CDN{{ID: 1}},
|
||||
}, nil).Times(1),
|
||||
hostManager.Store(gomock.Any()).Return().Times(1),
|
||||
dynconfig.Register(gomock.Any()).Return().Times(1),
|
||||
)
|
||||
},
|
||||
expect: func(t *testing.T, err error) {
|
||||
assert := assert.New(t)
|
||||
assert.NoError(err)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "new seed peer client failed because of dynconfig get error data",
|
||||
mock: func(dynconfig *configmocks.MockDynconfigInterfaceMockRecorder, hostManager *MockHostManagerMockRecorder) {
|
||||
|
|
@ -87,11 +71,12 @@ func TestSeedPeerClient_newSeedPeerClient(t *testing.T) {
|
|||
dynconfig.Get().Return(&config.DynconfigData{
|
||||
SeedPeers: []*config.SeedPeer{},
|
||||
}, nil).Times(1),
|
||||
dynconfig.Register(gomock.Any()).Return().Times(1),
|
||||
)
|
||||
},
|
||||
expect: func(t *testing.T, err error) {
|
||||
assert := assert.New(t)
|
||||
assert.EqualError(err, "address list of cdn is empty")
|
||||
assert.NoError(err)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
@ -125,12 +110,6 @@ func TestSeedPeerClient_OnNotify(t *testing.T) {
|
|||
IP: "0.0.0.0",
|
||||
Port: 8080,
|
||||
}},
|
||||
CDNs: []*config.CDN{{
|
||||
ID: 1,
|
||||
Hostname: "foo",
|
||||
IP: "0.0.0.0",
|
||||
Port: 8080,
|
||||
}},
|
||||
},
|
||||
mock: func(dynconfig *configmocks.MockDynconfigInterfaceMockRecorder, hostManager *MockHostManagerMockRecorder) {
|
||||
gomock.InOrder(
|
||||
|
|
@ -141,14 +120,8 @@ func TestSeedPeerClient_OnNotify(t *testing.T) {
|
|||
IP: "0.0.0.0",
|
||||
Port: 8080,
|
||||
}},
|
||||
CDNs: []*config.CDN{{
|
||||
ID: 1,
|
||||
Hostname: "foo",
|
||||
IP: "0.0.0.0",
|
||||
Port: 8080,
|
||||
}},
|
||||
}, nil).Times(1),
|
||||
hostManager.Store(gomock.Any()).Return().Times(2),
|
||||
hostManager.Store(gomock.Any()).Return().Times(1),
|
||||
dynconfig.Register(gomock.Any()).Return().Times(1),
|
||||
)
|
||||
},
|
||||
|
|
@ -161,11 +134,6 @@ func TestSeedPeerClient_OnNotify(t *testing.T) {
|
|||
Hostname: "foo",
|
||||
IP: "0.0.0.0",
|
||||
}},
|
||||
CDNs: []*config.CDN{{
|
||||
ID: 1,
|
||||
Hostname: "foo",
|
||||
IP: "0.0.0.0",
|
||||
}},
|
||||
},
|
||||
mock: func(dynconfig *configmocks.MockDynconfigInterfaceMockRecorder, hostManager *MockHostManagerMockRecorder) {
|
||||
mockHost := NewHost(mockRawHost)
|
||||
|
|
@ -176,19 +144,11 @@ func TestSeedPeerClient_OnNotify(t *testing.T) {
|
|||
Hostname: "foo",
|
||||
IP: "127.0.0.1",
|
||||
}},
|
||||
CDNs: []*config.CDN{{
|
||||
ID: 1,
|
||||
Hostname: "foo",
|
||||
IP: "127.0.0.1",
|
||||
}},
|
||||
}, nil).Times(1),
|
||||
hostManager.Store(gomock.Any()).Return().Times(2),
|
||||
hostManager.Store(gomock.Any()).Return().Times(1),
|
||||
dynconfig.Register(gomock.Any()).Return().Times(1),
|
||||
hostManager.Load(gomock.Any()).Return(mockHost, true).Times(1),
|
||||
hostManager.Delete(gomock.Eq("foo-0_Seed")).Return().Times(1),
|
||||
hostManager.Store(gomock.Any()).Return().Times(1),
|
||||
hostManager.Load(gomock.Any()).Return(mockHost, true).Times(1),
|
||||
hostManager.Delete(gomock.Eq("foo-0_CDN")).Return().Times(1),
|
||||
hostManager.Delete(gomock.Eq("foo-0")).Return().Times(1),
|
||||
hostManager.Store(gomock.Any()).Return().Times(1),
|
||||
)
|
||||
},
|
||||
|
|
@ -201,11 +161,6 @@ func TestSeedPeerClient_OnNotify(t *testing.T) {
|
|||
Hostname: "foo",
|
||||
IP: "0.0.0.0",
|
||||
}},
|
||||
CDNs: []*config.CDN{{
|
||||
ID: 1,
|
||||
Hostname: "foo",
|
||||
IP: "0.0.0.0",
|
||||
}},
|
||||
},
|
||||
mock: func(dynconfig *configmocks.MockDynconfigInterfaceMockRecorder, hostManager *MockHostManagerMockRecorder) {
|
||||
gomock.InOrder(
|
||||
|
|
@ -215,16 +170,9 @@ func TestSeedPeerClient_OnNotify(t *testing.T) {
|
|||
Hostname: "foo",
|
||||
IP: "127.0.0.1",
|
||||
}},
|
||||
CDNs: []*config.CDN{{
|
||||
ID: 1,
|
||||
Hostname: "foo",
|
||||
IP: "127.0.0.1",
|
||||
}},
|
||||
}, nil).Times(1),
|
||||
hostManager.Store(gomock.Any()).Return().Times(2),
|
||||
dynconfig.Register(gomock.Any()).Return().Times(1),
|
||||
hostManager.Load(gomock.Any()).Return(nil, false).Times(1),
|
||||
hostManager.Store(gomock.Any()).Return().Times(1),
|
||||
dynconfig.Register(gomock.Any()).Return().Times(1),
|
||||
hostManager.Load(gomock.Any()).Return(nil, false).Times(1),
|
||||
hostManager.Store(gomock.Any()).Return().Times(1),
|
||||
)
|
||||
|
|
@ -237,10 +185,6 @@ func TestSeedPeerClient_OnNotify(t *testing.T) {
|
|||
ID: 1,
|
||||
IP: "127.0.0.1",
|
||||
}},
|
||||
CDNs: []*config.CDN{{
|
||||
ID: 1,
|
||||
IP: "127.0.0.1",
|
||||
}},
|
||||
},
|
||||
mock: func(dynconfig *configmocks.MockDynconfigInterfaceMockRecorder, hostManager *MockHostManagerMockRecorder) {
|
||||
gomock.InOrder(
|
||||
|
|
@ -249,12 +193,8 @@ func TestSeedPeerClient_OnNotify(t *testing.T) {
|
|||
ID: 1,
|
||||
IP: "127.0.0.1",
|
||||
}},
|
||||
CDNs: []*config.CDN{{
|
||||
ID: 1,
|
||||
IP: "127.0.0.1",
|
||||
}},
|
||||
}, nil).Times(1),
|
||||
hostManager.Store(gomock.Any()).Return().Times(2),
|
||||
hostManager.Store(gomock.Any()).Return().Times(1),
|
||||
dynconfig.Register(gomock.Any()).Return().Times(1),
|
||||
)
|
||||
},
|
||||
|
|
@ -311,21 +251,20 @@ func TestSeedPeerClient_seedPeersToHosts(t *testing.T) {
|
|||
},
|
||||
expect: func(t *testing.T, hosts map[string]*Host) {
|
||||
assert := assert.New(t)
|
||||
assert.Equal(hosts[mockRawSeedHost.Uuid].ID, mockRawSeedHost.Uuid)
|
||||
assert.Equal(hosts[mockRawSeedHost.Uuid].Type, HostTypeSuperSeed)
|
||||
assert.Equal(hosts[mockRawSeedHost.Uuid].IP, mockRawSeedHost.Ip)
|
||||
assert.Equal(hosts[mockRawSeedHost.Uuid].Hostname, mockRawSeedHost.HostName)
|
||||
assert.Equal(hosts[mockRawSeedHost.Uuid].Port, mockRawSeedHost.RpcPort)
|
||||
assert.Equal(hosts[mockRawSeedHost.Uuid].DownloadPort, mockRawSeedHost.DownPort)
|
||||
assert.Equal(hosts[mockRawSeedHost.Uuid].IDC, mockRawSeedHost.Idc)
|
||||
assert.Equal(hosts[mockRawSeedHost.Uuid].NetTopology, mockRawSeedHost.NetTopology)
|
||||
assert.Equal(hosts[mockRawSeedHost.Uuid].Location, mockRawSeedHost.Location)
|
||||
assert.Equal(hosts[mockRawSeedHost.Uuid].UploadLoadLimit.Load(), int32(10))
|
||||
assert.Empty(hosts[mockRawSeedHost.Uuid].Peers)
|
||||
assert.Equal(hosts[mockRawSeedHost.Uuid].IsCDN, false)
|
||||
assert.NotEqual(hosts[mockRawSeedHost.Uuid].CreateAt.Load(), 0)
|
||||
assert.NotEqual(hosts[mockRawSeedHost.Uuid].UpdateAt.Load(), 0)
|
||||
assert.NotNil(hosts[mockRawSeedHost.Uuid].Log)
|
||||
assert.Equal(hosts[mockRawSeedHost.Id].ID, mockRawSeedHost.Id)
|
||||
assert.Equal(hosts[mockRawSeedHost.Id].Type, HostTypeSuperSeed)
|
||||
assert.Equal(hosts[mockRawSeedHost.Id].IP, mockRawSeedHost.Ip)
|
||||
assert.Equal(hosts[mockRawSeedHost.Id].Hostname, mockRawSeedHost.HostName)
|
||||
assert.Equal(hosts[mockRawSeedHost.Id].Port, mockRawSeedHost.RpcPort)
|
||||
assert.Equal(hosts[mockRawSeedHost.Id].DownloadPort, mockRawSeedHost.DownPort)
|
||||
assert.Equal(hosts[mockRawSeedHost.Id].IDC, mockRawSeedHost.Idc)
|
||||
assert.Equal(hosts[mockRawSeedHost.Id].NetTopology, mockRawSeedHost.NetTopology)
|
||||
assert.Equal(hosts[mockRawSeedHost.Id].Location, mockRawSeedHost.Location)
|
||||
assert.Equal(hosts[mockRawSeedHost.Id].UploadLoadLimit.Load(), int32(10))
|
||||
assert.Empty(hosts[mockRawSeedHost.Id].Peers)
|
||||
assert.NotEqual(hosts[mockRawSeedHost.Id].CreateAt.Load(), 0)
|
||||
assert.NotEqual(hosts[mockRawSeedHost.Id].UpdateAt.Load(), 0)
|
||||
assert.NotNil(hosts[mockRawSeedHost.Id].Log)
|
||||
},
|
||||
},
|
||||
{
|
||||
|
|
@ -345,21 +284,20 @@ func TestSeedPeerClient_seedPeersToHosts(t *testing.T) {
|
|||
},
|
||||
expect: func(t *testing.T, hosts map[string]*Host) {
|
||||
assert := assert.New(t)
|
||||
assert.Equal(hosts[mockRawSeedHost.Uuid].ID, mockRawSeedHost.Uuid)
|
||||
assert.Equal(hosts[mockRawSeedHost.Uuid].Type, HostTypeSuperSeed)
|
||||
assert.Equal(hosts[mockRawSeedHost.Uuid].IP, mockRawSeedHost.Ip)
|
||||
assert.Equal(hosts[mockRawSeedHost.Uuid].Hostname, mockRawSeedHost.HostName)
|
||||
assert.Equal(hosts[mockRawSeedHost.Uuid].Port, mockRawSeedHost.RpcPort)
|
||||
assert.Equal(hosts[mockRawSeedHost.Uuid].DownloadPort, mockRawSeedHost.DownPort)
|
||||
assert.Equal(hosts[mockRawSeedHost.Uuid].IDC, mockRawSeedHost.Idc)
|
||||
assert.Equal(hosts[mockRawSeedHost.Uuid].NetTopology, mockRawSeedHost.NetTopology)
|
||||
assert.Equal(hosts[mockRawSeedHost.Uuid].Location, mockRawSeedHost.Location)
|
||||
assert.Equal(hosts[mockRawSeedHost.Uuid].UploadLoadLimit.Load(), int32(config.DefaultClientLoadLimit))
|
||||
assert.Empty(hosts[mockRawSeedHost.Uuid].Peers)
|
||||
assert.Equal(hosts[mockRawSeedHost.Uuid].IsCDN, false)
|
||||
assert.NotEqual(hosts[mockRawSeedHost.Uuid].CreateAt.Load(), 0)
|
||||
assert.NotEqual(hosts[mockRawSeedHost.Uuid].UpdateAt.Load(), 0)
|
||||
assert.NotNil(hosts[mockRawSeedHost.Uuid].Log)
|
||||
assert.Equal(hosts[mockRawSeedHost.Id].ID, mockRawSeedHost.Id)
|
||||
assert.Equal(hosts[mockRawSeedHost.Id].Type, HostTypeSuperSeed)
|
||||
assert.Equal(hosts[mockRawSeedHost.Id].IP, mockRawSeedHost.Ip)
|
||||
assert.Equal(hosts[mockRawSeedHost.Id].Hostname, mockRawSeedHost.HostName)
|
||||
assert.Equal(hosts[mockRawSeedHost.Id].Port, mockRawSeedHost.RpcPort)
|
||||
assert.Equal(hosts[mockRawSeedHost.Id].DownloadPort, mockRawSeedHost.DownPort)
|
||||
assert.Equal(hosts[mockRawSeedHost.Id].IDC, mockRawSeedHost.Idc)
|
||||
assert.Equal(hosts[mockRawSeedHost.Id].NetTopology, mockRawSeedHost.NetTopology)
|
||||
assert.Equal(hosts[mockRawSeedHost.Id].Location, mockRawSeedHost.Location)
|
||||
assert.Equal(hosts[mockRawSeedHost.Id].UploadLoadLimit.Load(), int32(config.DefaultClientLoadLimit))
|
||||
assert.Empty(hosts[mockRawSeedHost.Id].Peers)
|
||||
assert.NotEqual(hosts[mockRawSeedHost.Id].CreateAt.Load(), 0)
|
||||
assert.NotEqual(hosts[mockRawSeedHost.Id].UpdateAt.Load(), 0)
|
||||
assert.NotNil(hosts[mockRawSeedHost.Id].Log)
|
||||
},
|
||||
},
|
||||
{
|
||||
|
|
@ -623,344 +561,3 @@ func TestSeedPeerClient_diffSeedPeers(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSeedPeerClient_cdnsToHosts(t *testing.T) {
|
||||
mockCDNClusterConfig, err := json.Marshal(&types.CDNClusterConfig{
|
||||
LoadLimit: 10,
|
||||
NetTopology: "foo",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
cdns []*config.CDN
|
||||
expect func(t *testing.T, hosts map[string]*Host)
|
||||
}{
|
||||
{
|
||||
name: "cdns covert to hosts",
|
||||
cdns: []*config.CDN{
|
||||
{
|
||||
ID: 1,
|
||||
Hostname: mockRawCDNHost.HostName,
|
||||
IP: mockRawCDNHost.Ip,
|
||||
Port: mockRawCDNHost.RpcPort,
|
||||
DownloadPort: mockRawCDNHost.DownPort,
|
||||
Location: mockRawCDNHost.Location,
|
||||
IDC: mockRawCDNHost.Idc,
|
||||
CDNCluster: &config.CDNCluster{
|
||||
Config: mockCDNClusterConfig,
|
||||
},
|
||||
},
|
||||
},
|
||||
expect: func(t *testing.T, hosts map[string]*Host) {
|
||||
assert := assert.New(t)
|
||||
assert.Equal(hosts[mockRawCDNHost.Uuid].ID, mockRawCDNHost.Uuid)
|
||||
assert.Equal(hosts[mockRawCDNHost.Uuid].Type, HostTypeSuperSeed)
|
||||
assert.Equal(hosts[mockRawCDNHost.Uuid].IP, mockRawCDNHost.Ip)
|
||||
assert.Equal(hosts[mockRawCDNHost.Uuid].Hostname, mockRawCDNHost.HostName)
|
||||
assert.Equal(hosts[mockRawCDNHost.Uuid].Port, mockRawCDNHost.RpcPort)
|
||||
assert.Equal(hosts[mockRawCDNHost.Uuid].DownloadPort, mockRawCDNHost.DownPort)
|
||||
assert.Equal(hosts[mockRawCDNHost.Uuid].IDC, mockRawCDNHost.Idc)
|
||||
assert.Equal(hosts[mockRawCDNHost.Uuid].NetTopology, "foo")
|
||||
assert.Equal(hosts[mockRawCDNHost.Uuid].Location, mockRawCDNHost.Location)
|
||||
assert.Equal(hosts[mockRawCDNHost.Uuid].UploadLoadLimit.Load(), int32(10))
|
||||
assert.Empty(hosts[mockRawCDNHost.Uuid].Peers)
|
||||
assert.Equal(hosts[mockRawCDNHost.Uuid].IsCDN, true)
|
||||
assert.NotEqual(hosts[mockRawCDNHost.Uuid].CreateAt.Load(), 0)
|
||||
assert.NotEqual(hosts[mockRawCDNHost.Uuid].UpdateAt.Load(), 0)
|
||||
assert.NotNil(hosts[mockRawCDNHost.Uuid].Log)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "cdns covert to hosts without cluster config",
|
||||
cdns: []*config.CDN{
|
||||
{
|
||||
ID: 1,
|
||||
Hostname: mockRawCDNHost.HostName,
|
||||
IP: mockRawCDNHost.Ip,
|
||||
Port: mockRawCDNHost.RpcPort,
|
||||
DownloadPort: mockRawCDNHost.DownPort,
|
||||
Location: mockRawCDNHost.Location,
|
||||
IDC: mockRawCDNHost.Idc,
|
||||
},
|
||||
},
|
||||
expect: func(t *testing.T, hosts map[string]*Host) {
|
||||
assert := assert.New(t)
|
||||
assert.Equal(hosts[mockRawCDNHost.Uuid].ID, mockRawCDNHost.Uuid)
|
||||
assert.Equal(hosts[mockRawCDNHost.Uuid].Type, HostTypeSuperSeed)
|
||||
assert.Equal(hosts[mockRawCDNHost.Uuid].IP, mockRawCDNHost.Ip)
|
||||
assert.Equal(hosts[mockRawCDNHost.Uuid].Hostname, mockRawCDNHost.HostName)
|
||||
assert.Equal(hosts[mockRawCDNHost.Uuid].Port, mockRawCDNHost.RpcPort)
|
||||
assert.Equal(hosts[mockRawCDNHost.Uuid].DownloadPort, mockRawCDNHost.DownPort)
|
||||
assert.Equal(hosts[mockRawCDNHost.Uuid].IDC, mockRawCDNHost.Idc)
|
||||
assert.Equal(hosts[mockRawCDNHost.Uuid].NetTopology, "")
|
||||
assert.Equal(hosts[mockRawCDNHost.Uuid].Location, mockRawCDNHost.Location)
|
||||
assert.Equal(hosts[mockRawCDNHost.Uuid].UploadLoadLimit.Load(), int32(config.DefaultClientLoadLimit))
|
||||
assert.Empty(hosts[mockRawCDNHost.Uuid].Peers)
|
||||
assert.Equal(hosts[mockRawCDNHost.Uuid].IsCDN, true)
|
||||
assert.NotEqual(hosts[mockRawCDNHost.Uuid].CreateAt.Load(), 0)
|
||||
assert.NotEqual(hosts[mockRawCDNHost.Uuid].UpdateAt.Load(), 0)
|
||||
assert.NotNil(hosts[mockRawCDNHost.Uuid].Log)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "cdns is empty",
|
||||
cdns: []*config.CDN{},
|
||||
expect: func(t *testing.T, hosts map[string]*Host) {
|
||||
assert := assert.New(t)
|
||||
assert.Equal(len(hosts), 0)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
tc.expect(t, cdnsToHosts(tc.cdns))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSeedPeerClient_cdnsToNetAddrs(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
cdns []*config.CDN
|
||||
expect func(t *testing.T, netAddrs []dfnet.NetAddr)
|
||||
}{
|
||||
{
|
||||
name: "cdns covert to netAddr",
|
||||
cdns: []*config.CDN{
|
||||
{
|
||||
ID: 1,
|
||||
Hostname: mockRawCDNHost.HostName,
|
||||
IP: mockRawCDNHost.Ip,
|
||||
Port: mockRawCDNHost.RpcPort,
|
||||
DownloadPort: mockRawCDNHost.DownPort,
|
||||
Location: mockRawCDNHost.Location,
|
||||
IDC: mockRawCDNHost.Idc,
|
||||
},
|
||||
},
|
||||
expect: func(t *testing.T, netAddrs []dfnet.NetAddr) {
|
||||
assert := assert.New(t)
|
||||
assert.Equal(netAddrs[0].Type, dfnet.TCP)
|
||||
assert.Equal(netAddrs[0].Addr, fmt.Sprintf("%s:%d", mockRawCDNHost.Ip, mockRawCDNHost.RpcPort))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "cdns is empty",
|
||||
cdns: []*config.CDN{},
|
||||
expect: func(t *testing.T, netAddrs []dfnet.NetAddr) {
|
||||
assert := assert.New(t)
|
||||
assert.Equal(len(netAddrs), 0)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
tc.expect(t, cdnsToNetAddrs(tc.cdns))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSeedPeerClient_diffCDNs(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
cx []*config.CDN
|
||||
cy []*config.CDN
|
||||
expect func(t *testing.T, diff []*config.CDN)
|
||||
}{
|
||||
{
|
||||
name: "same cdn list",
|
||||
cx: []*config.CDN{
|
||||
{
|
||||
ID: 1,
|
||||
Hostname: "foo",
|
||||
IP: "127.0.0.1",
|
||||
Port: 8080,
|
||||
},
|
||||
},
|
||||
cy: []*config.CDN{
|
||||
{
|
||||
ID: 1,
|
||||
Hostname: "foo",
|
||||
IP: "127.0.0.1",
|
||||
Port: 8080,
|
||||
},
|
||||
},
|
||||
expect: func(t *testing.T, diff []*config.CDN) {
|
||||
assert := assert.New(t)
|
||||
assert.EqualValues(diff, []*config.CDN(nil))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "different hostname",
|
||||
cx: []*config.CDN{
|
||||
{
|
||||
ID: 1,
|
||||
Hostname: "bar",
|
||||
IP: "127.0.0.1",
|
||||
Port: 8080,
|
||||
},
|
||||
},
|
||||
cy: []*config.CDN{
|
||||
{
|
||||
ID: 1,
|
||||
Hostname: "foo",
|
||||
IP: "127.0.0.1",
|
||||
Port: 8080,
|
||||
},
|
||||
},
|
||||
expect: func(t *testing.T, diff []*config.CDN) {
|
||||
assert := assert.New(t)
|
||||
assert.EqualValues(diff, []*config.CDN{
|
||||
{
|
||||
ID: 1,
|
||||
Hostname: "bar",
|
||||
IP: "127.0.0.1",
|
||||
Port: 8080,
|
||||
},
|
||||
})
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "different port",
|
||||
cx: []*config.CDN{
|
||||
{
|
||||
ID: 1,
|
||||
Hostname: "foo",
|
||||
IP: "127.0.0.1",
|
||||
Port: 8081,
|
||||
},
|
||||
},
|
||||
cy: []*config.CDN{
|
||||
{
|
||||
ID: 1,
|
||||
Hostname: "foo",
|
||||
IP: "127.0.0.1",
|
||||
Port: 8080,
|
||||
},
|
||||
},
|
||||
expect: func(t *testing.T, diff []*config.CDN) {
|
||||
assert := assert.New(t)
|
||||
assert.EqualValues(diff, []*config.CDN{
|
||||
{
|
||||
ID: 1,
|
||||
Hostname: "foo",
|
||||
IP: "127.0.0.1",
|
||||
Port: 8081,
|
||||
},
|
||||
})
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "different ip",
|
||||
cx: []*config.CDN{
|
||||
{
|
||||
ID: 1,
|
||||
Hostname: "foo",
|
||||
IP: "0.0.0.0",
|
||||
Port: 8080,
|
||||
},
|
||||
},
|
||||
cy: []*config.CDN{
|
||||
{
|
||||
ID: 1,
|
||||
Hostname: "foo",
|
||||
IP: "127.0.0.1",
|
||||
Port: 8080,
|
||||
},
|
||||
},
|
||||
expect: func(t *testing.T, diff []*config.CDN) {
|
||||
assert := assert.New(t)
|
||||
assert.EqualValues(diff, []*config.CDN{
|
||||
{
|
||||
ID: 1,
|
||||
Hostname: "foo",
|
||||
IP: "0.0.0.0",
|
||||
Port: 8080,
|
||||
},
|
||||
})
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "remove y cdn",
|
||||
cx: []*config.CDN{
|
||||
{
|
||||
ID: 1,
|
||||
Hostname: "foo",
|
||||
IP: "127.0.0.1",
|
||||
Port: 8080,
|
||||
},
|
||||
{
|
||||
ID: 2,
|
||||
Hostname: "bar",
|
||||
IP: "127.0.0.1",
|
||||
Port: 8080,
|
||||
},
|
||||
},
|
||||
cy: []*config.CDN{
|
||||
{
|
||||
ID: 1,
|
||||
Hostname: "foo",
|
||||
IP: "127.0.0.1",
|
||||
Port: 8080,
|
||||
},
|
||||
},
|
||||
expect: func(t *testing.T, diff []*config.CDN) {
|
||||
assert := assert.New(t)
|
||||
assert.EqualValues(diff, []*config.CDN{
|
||||
{
|
||||
ID: 2,
|
||||
Hostname: "bar",
|
||||
IP: "127.0.0.1",
|
||||
Port: 8080,
|
||||
},
|
||||
})
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "remove x cdn",
|
||||
cx: []*config.CDN{
|
||||
{
|
||||
ID: 1,
|
||||
Hostname: "foo",
|
||||
IP: "127.0.0.1",
|
||||
Port: 8080,
|
||||
},
|
||||
},
|
||||
cy: []*config.CDN{
|
||||
{
|
||||
ID: 1,
|
||||
Hostname: "baz",
|
||||
IP: "127.0.0.1",
|
||||
Port: 8080,
|
||||
},
|
||||
{
|
||||
ID: 2,
|
||||
Hostname: "bar",
|
||||
IP: "127.0.0.1",
|
||||
Port: 8080,
|
||||
},
|
||||
},
|
||||
expect: func(t *testing.T, diff []*config.CDN) {
|
||||
assert := assert.New(t)
|
||||
assert.EqualValues(diff, []*config.CDN{
|
||||
{
|
||||
ID: 1,
|
||||
Hostname: "foo",
|
||||
IP: "127.0.0.1",
|
||||
Port: 8080,
|
||||
},
|
||||
})
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
tc.expect(t, diffCDNs(tc.cx, tc.cy))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ import (
|
|||
|
||||
var (
|
||||
mockRawHost = &scheduler.PeerHost{
|
||||
Uuid: idgen.HostID("hostname", 8003),
|
||||
Id: idgen.HostID("hostname", 8003),
|
||||
Ip: "127.0.0.1",
|
||||
RpcPort: 8003,
|
||||
DownPort: 8001,
|
||||
|
|
|
|||
|
|
@ -49,7 +49,7 @@ var (
|
|||
Algorithm: evaluator.DefaultAlgorithm,
|
||||
}
|
||||
mockRawHost = &rpcscheduler.PeerHost{
|
||||
Uuid: idgen.HostID("hostname", 8003),
|
||||
Id: idgen.HostID("hostname", 8003),
|
||||
Ip: "127.0.0.1",
|
||||
RpcPort: 8003,
|
||||
DownPort: 8001,
|
||||
|
|
@ -61,7 +61,7 @@ var (
|
|||
}
|
||||
|
||||
mockRawSeedHost = &rpcscheduler.PeerHost{
|
||||
Uuid: idgen.SeedHostID("hostname", 8003),
|
||||
Id: idgen.HostID("hostname_seed", 8003),
|
||||
Ip: "127.0.0.1",
|
||||
RpcPort: 8003,
|
||||
DownPort: 8001,
|
||||
|
|
|
|||
|
|
@ -532,7 +532,7 @@ func (s *Service) registerTask(ctx context.Context, req *rpcscheduler.PeerTaskRe
|
|||
|
||||
// registerHost creates a new host or reuses a previous host.
|
||||
func (s *Service) registerHost(ctx context.Context, rawHost *rpcscheduler.PeerHost) *resource.Host {
|
||||
host, ok := s.resource.HostManager().Load(rawHost.Uuid)
|
||||
host, ok := s.resource.HostManager().Load(rawHost.Id)
|
||||
if !ok {
|
||||
// Get scheduler cluster client config by manager.
|
||||
var options []resource.HostOption
|
||||
|
|
@ -844,7 +844,6 @@ func (s *Service) createRecord(peer *resource.Peer, peerState int, req *rpcsched
|
|||
record.ParentLocation = parent.Host.Location
|
||||
record.ParentFreeUploadLoad = parent.Host.FreeUploadLoad()
|
||||
record.ParentHostType = int(parent.Host.Type)
|
||||
record.ParentIsCDN = parent.Host.IsCDN
|
||||
record.ParentCreateAt = parent.CreateAt.Load().UnixNano()
|
||||
record.ParentUpdateAt = parent.UpdateAt.Load().UnixNano()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -58,7 +58,7 @@ var (
|
|||
}
|
||||
|
||||
mockRawHost = &rpcscheduler.PeerHost{
|
||||
Uuid: idgen.HostID("hostname", 8003),
|
||||
Id: idgen.HostID("hostname", 8003),
|
||||
Ip: "127.0.0.1",
|
||||
RpcPort: 8003,
|
||||
DownPort: 8001,
|
||||
|
|
@ -70,7 +70,7 @@ var (
|
|||
}
|
||||
|
||||
mockRawSeedHost = &rpcscheduler.PeerHost{
|
||||
Uuid: idgen.SeedHostID("hostname", 8003),
|
||||
Id: idgen.HostID("hostname_seed", 8003),
|
||||
Ip: "127.0.0.1",
|
||||
RpcPort: 8003,
|
||||
DownPort: 8001,
|
||||
|
|
@ -166,7 +166,7 @@ func TestService_RegisterPeerTask(t *testing.T) {
|
|||
req: &rpcscheduler.PeerTaskRequest{
|
||||
UrlMeta: &base.UrlMeta{},
|
||||
PeerHost: &rpcscheduler.PeerHost{
|
||||
Uuid: mockRawHost.Uuid,
|
||||
Id: mockRawHost.Id,
|
||||
},
|
||||
},
|
||||
mock: func(
|
||||
|
|
@ -198,7 +198,7 @@ func TestService_RegisterPeerTask(t *testing.T) {
|
|||
req: &rpcscheduler.PeerTaskRequest{
|
||||
UrlMeta: &base.UrlMeta{},
|
||||
PeerHost: &rpcscheduler.PeerHost{
|
||||
Uuid: mockRawHost.Uuid,
|
||||
Id: mockRawHost.Id,
|
||||
},
|
||||
},
|
||||
mock: func(
|
||||
|
|
@ -231,7 +231,7 @@ func TestService_RegisterPeerTask(t *testing.T) {
|
|||
req: &rpcscheduler.PeerTaskRequest{
|
||||
UrlMeta: &base.UrlMeta{},
|
||||
PeerHost: &rpcscheduler.PeerHost{
|
||||
Uuid: mockRawHost.Uuid,
|
||||
Id: mockRawHost.Id,
|
||||
},
|
||||
},
|
||||
mock: func(
|
||||
|
|
@ -265,7 +265,7 @@ func TestService_RegisterPeerTask(t *testing.T) {
|
|||
req: &rpcscheduler.PeerTaskRequest{
|
||||
UrlMeta: &base.UrlMeta{},
|
||||
PeerHost: &rpcscheduler.PeerHost{
|
||||
Uuid: mockRawHost.Uuid,
|
||||
Id: mockRawHost.Id,
|
||||
},
|
||||
},
|
||||
mock: func(
|
||||
|
|
@ -303,7 +303,7 @@ func TestService_RegisterPeerTask(t *testing.T) {
|
|||
req: &rpcscheduler.PeerTaskRequest{
|
||||
UrlMeta: &base.UrlMeta{},
|
||||
PeerHost: &rpcscheduler.PeerHost{
|
||||
Uuid: mockRawHost.Uuid,
|
||||
Id: mockRawHost.Id,
|
||||
},
|
||||
},
|
||||
mock: func(
|
||||
|
|
@ -339,7 +339,7 @@ func TestService_RegisterPeerTask(t *testing.T) {
|
|||
req: &rpcscheduler.PeerTaskRequest{
|
||||
UrlMeta: &base.UrlMeta{},
|
||||
PeerHost: &rpcscheduler.PeerHost{
|
||||
Uuid: mockRawHost.Uuid,
|
||||
Id: mockRawHost.Id,
|
||||
},
|
||||
},
|
||||
mock: func(
|
||||
|
|
@ -375,7 +375,7 @@ func TestService_RegisterPeerTask(t *testing.T) {
|
|||
req: &rpcscheduler.PeerTaskRequest{
|
||||
UrlMeta: &base.UrlMeta{},
|
||||
PeerHost: &rpcscheduler.PeerHost{
|
||||
Uuid: mockRawHost.Uuid,
|
||||
Id: mockRawHost.Id,
|
||||
},
|
||||
},
|
||||
mock: func(
|
||||
|
|
@ -411,7 +411,7 @@ func TestService_RegisterPeerTask(t *testing.T) {
|
|||
req: &rpcscheduler.PeerTaskRequest{
|
||||
UrlMeta: &base.UrlMeta{},
|
||||
PeerHost: &rpcscheduler.PeerHost{
|
||||
Uuid: mockRawHost.Uuid,
|
||||
Id: mockRawHost.Id,
|
||||
},
|
||||
},
|
||||
mock: func(
|
||||
|
|
@ -453,7 +453,7 @@ func TestService_RegisterPeerTask(t *testing.T) {
|
|||
req: &rpcscheduler.PeerTaskRequest{
|
||||
UrlMeta: &base.UrlMeta{},
|
||||
PeerHost: &rpcscheduler.PeerHost{
|
||||
Uuid: mockRawHost.Uuid,
|
||||
Id: mockRawHost.Id,
|
||||
},
|
||||
},
|
||||
mock: func(
|
||||
|
|
@ -494,7 +494,7 @@ func TestService_RegisterPeerTask(t *testing.T) {
|
|||
req: &rpcscheduler.PeerTaskRequest{
|
||||
UrlMeta: &base.UrlMeta{},
|
||||
PeerHost: &rpcscheduler.PeerHost{
|
||||
Uuid: mockRawHost.Uuid,
|
||||
Id: mockRawHost.Id,
|
||||
},
|
||||
},
|
||||
mock: func(
|
||||
|
|
@ -535,7 +535,7 @@ func TestService_RegisterPeerTask(t *testing.T) {
|
|||
req: &rpcscheduler.PeerTaskRequest{
|
||||
UrlMeta: &base.UrlMeta{},
|
||||
PeerHost: &rpcscheduler.PeerHost{
|
||||
Uuid: mockRawHost.Uuid,
|
||||
Id: mockRawHost.Id,
|
||||
},
|
||||
},
|
||||
mock: func(
|
||||
|
|
@ -576,7 +576,7 @@ func TestService_RegisterPeerTask(t *testing.T) {
|
|||
req: &rpcscheduler.PeerTaskRequest{
|
||||
UrlMeta: &base.UrlMeta{},
|
||||
PeerHost: &rpcscheduler.PeerHost{
|
||||
Uuid: mockRawHost.Uuid,
|
||||
Id: mockRawHost.Id,
|
||||
},
|
||||
},
|
||||
mock: func(
|
||||
|
|
@ -611,7 +611,7 @@ func TestService_RegisterPeerTask(t *testing.T) {
|
|||
req: &rpcscheduler.PeerTaskRequest{
|
||||
UrlMeta: &base.UrlMeta{},
|
||||
PeerHost: &rpcscheduler.PeerHost{
|
||||
Uuid: mockRawHost.Uuid,
|
||||
Id: mockRawHost.Id,
|
||||
},
|
||||
},
|
||||
mock: func(
|
||||
|
|
@ -1232,7 +1232,7 @@ func TestService_AnnounceTask(t *testing.T) {
|
|||
Cid: mockCID,
|
||||
UrlMeta: &base.UrlMeta{},
|
||||
PeerHost: &rpcscheduler.PeerHost{
|
||||
Uuid: mockRawHost.Uuid,
|
||||
Id: mockRawHost.Id,
|
||||
},
|
||||
PiecePacket: &base.PiecePacket{
|
||||
PieceInfos: []*base.PieceInfo{{PieceNum: 1}},
|
||||
|
|
@ -2028,12 +2028,12 @@ func TestService_registerHost(t *testing.T) {
|
|||
mock: func(mockHost *resource.Host, hostManager resource.HostManager, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) {
|
||||
gomock.InOrder(
|
||||
mr.HostManager().Return(hostManager).Times(1),
|
||||
mh.Load(gomock.Eq(mockRawHost.Uuid)).Return(mockHost, true).Times(1),
|
||||
mh.Load(gomock.Eq(mockRawHost.Id)).Return(mockHost, true).Times(1),
|
||||
)
|
||||
},
|
||||
expect: func(t *testing.T, host *resource.Host) {
|
||||
assert := assert.New(t)
|
||||
assert.Equal(host.ID, mockRawHost.Uuid)
|
||||
assert.Equal(host.ID, mockRawHost.Id)
|
||||
},
|
||||
},
|
||||
{
|
||||
|
|
@ -2046,7 +2046,7 @@ func TestService_registerHost(t *testing.T) {
|
|||
mock: func(mockHost *resource.Host, hostManager resource.HostManager, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) {
|
||||
gomock.InOrder(
|
||||
mr.HostManager().Return(hostManager).Times(1),
|
||||
mh.Load(gomock.Eq(mockRawHost.Uuid)).Return(nil, false).Times(1),
|
||||
mh.Load(gomock.Eq(mockRawHost.Id)).Return(nil, false).Times(1),
|
||||
md.GetSchedulerClusterClientConfig().Return(types.SchedulerClusterClientConfig{LoadLimit: 10}, true).Times(1),
|
||||
mr.HostManager().Return(hostManager).Times(1),
|
||||
mh.Store(gomock.Any()).Return().Times(1),
|
||||
|
|
@ -2054,7 +2054,7 @@ func TestService_registerHost(t *testing.T) {
|
|||
},
|
||||
expect: func(t *testing.T, host *resource.Host) {
|
||||
assert := assert.New(t)
|
||||
assert.Equal(host.ID, mockRawHost.Uuid)
|
||||
assert.Equal(host.ID, mockRawHost.Id)
|
||||
assert.Equal(host.UploadLoadLimit.Load(), int32(10))
|
||||
},
|
||||
},
|
||||
|
|
@ -2068,7 +2068,7 @@ func TestService_registerHost(t *testing.T) {
|
|||
mock: func(mockHost *resource.Host, hostManager resource.HostManager, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) {
|
||||
gomock.InOrder(
|
||||
mr.HostManager().Return(hostManager).Times(1),
|
||||
mh.Load(gomock.Eq(mockRawHost.Uuid)).Return(nil, false).Times(1),
|
||||
mh.Load(gomock.Eq(mockRawHost.Id)).Return(nil, false).Times(1),
|
||||
md.GetSchedulerClusterClientConfig().Return(types.SchedulerClusterClientConfig{}, false).Times(1),
|
||||
mr.HostManager().Return(hostManager).Times(1),
|
||||
mh.Store(gomock.Any()).Return().Times(1),
|
||||
|
|
@ -2076,7 +2076,7 @@ func TestService_registerHost(t *testing.T) {
|
|||
},
|
||||
expect: func(t *testing.T, host *resource.Host) {
|
||||
assert := assert.New(t)
|
||||
assert.Equal(host.ID, mockRawHost.Uuid)
|
||||
assert.Equal(host.ID, mockRawHost.Id)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
|
|||
|
|
@ -167,9 +167,6 @@ type Record struct {
|
|||
// ParentHostType is parent host type.
|
||||
ParentHostType int `csv:"parentHostType"`
|
||||
|
||||
// ParentIsCDN is used as tag cdn.
|
||||
ParentIsCDN bool `csv:"parentIsCDN"`
|
||||
|
||||
// ParentCreateAt is parent peer create nanosecond time.
|
||||
ParentCreateAt int64 `csv:"parentCreateAt"`
|
||||
|
||||
|
|
|
|||
|
|
@ -266,7 +266,6 @@ func TestStorage_List(t *testing.T) {
|
|||
ParentNetTopology: "parent_net_topology",
|
||||
ParentLocation: "parent_location",
|
||||
ParentFreeUploadLoad: 1,
|
||||
ParentIsCDN: true,
|
||||
ParentCreateAt: time.Now().UnixNano(),
|
||||
ParentUpdateAt: time.Now().UnixNano(),
|
||||
},
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ const (
|
|||
const (
|
||||
managerServerName = "manager"
|
||||
schedulerServerName = "scheduler"
|
||||
cdnServerName = "cdn"
|
||||
seedPeerServerName = "seed-peer"
|
||||
dfdaemonServerName = "dfdaemon"
|
||||
proxyServerName = "proxy"
|
||||
)
|
||||
|
|
@ -56,10 +56,10 @@ var servers = map[string]server{
|
|||
logDirName: schedulerServerName,
|
||||
replicas: 3,
|
||||
},
|
||||
cdnServerName: {
|
||||
name: cdnServerName,
|
||||
seedPeerServerName: {
|
||||
name: seedPeerServerName,
|
||||
namespace: dragonflyNamespace,
|
||||
logDirName: cdnServerName,
|
||||
logDirName: "daemon",
|
||||
replicas: 3,
|
||||
},
|
||||
dfdaemonServerName: {
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@
|
|||
package manager
|
||||
|
||||
const (
|
||||
cdnCachePath = "/tmp/cdn/download"
|
||||
seedPeerDataPath = "/var/lib/dragonfly"
|
||||
|
||||
managerService = "dragonfly-manager.dragonfly-system.svc"
|
||||
managerPort = "8080"
|
||||
|
|
@ -26,7 +26,4 @@ const (
|
|||
|
||||
dragonflyNamespace = "dragonfly-system"
|
||||
e2eNamespace = "dragonfly-e2e"
|
||||
|
||||
proxy = "localhost:65001"
|
||||
hostnameFilePath = "/etc/hostname"
|
||||
)
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue