Compare commits

...

141 Commits

Author SHA1 Message Date
renovate[bot] 1952271636 fix(deps): update patch digest dependencies 2025-07-13 06:13:23 +00:00
renovate[bot] 324bc217ea fix(deps): update minor dependencies 2025-07-13 06:05:28 +00:00
jinhong.kim0 f3729b8401 fix(deps): update patch digest dependencies
Signed-off-by: jinhong.kim0 <hookak25@gmail.com>
2025-07-11 15:58:30 +08:00
jinhong.kim0 734c3d1176 feat: add disk metrics API support
Signed-off-by: jinhong.kim0 <hookak25@gmail.com>
2025-07-11 15:58:30 +08:00
jinhong.kim0 ce049b97cc fix: update ReplicaRebuildStatus field name for type compatibility
Signed-off-by: jinhong.kim0 <hookak25@gmail.com>
2025-07-03 23:46:54 +08:00
Shuo Wu a041bc46f4 fix: update vendor longhorn/longhorn-spdk-engine
Longhorn 11188

Signed-off-by: Shuo Wu <shuo.wu@suse.com>
2025-07-03 08:21:15 +08:00
Derek Su fdca841434 chore(dockerfile): add cache buster to avoid using old dependencies
Longhorn 11110

Signed-off-by: Derek Su <derek.su@suse.com>
2025-07-02 14:26:55 +08:00
Derek Su 8e15bd425c chore(dockerfile): move libqcow to dedicated GitHub repo (longhorn/libqcow)
Longhorn 10988

Signed-off-by: Derek Su <derek.su@suse.com>
2025-06-30 17:03:25 +08:00
renovate[bot] 50930d65bf fix(deps): update patch digest dependencies 2025-06-29 05:18:36 +00:00
jinhong.kim0 5f689052fd feat(proxy): implement ReplicaRebuildingQosSet for SPDK replica rebuild QoS control
Longhorn 10770

Signed-off-by: jinhong.kim0 <hookak25@gmail.com>
2025-06-24 11:01:17 +08:00
Shuo Wu c427b13bbe fix: update vendor for v2 volume range hash
Longhorn 10037

Signed-off-by: Shuo Wu <shuo.wu@suse.com>
2025-06-23 12:45:48 -07:00
renovate[bot] 74c7baad7f fix(deps): update module k8s.io/mount-utils to v0.33.2 2025-06-22 06:16:25 +00:00
renovate[bot] a107a0eaa2 fix(deps): update patch digest dependencies 2025-06-15 15:00:02 +00:00
renovate[bot] 577a55621b fix(deps): update patch digest dependencies 2025-06-15 05:49:10 +00:00
renovate[bot] 8d1905d62a fix(deps): update minor dependencies 2025-06-08 14:23:21 +00:00
renovate[bot] 3ba929221e fix(deps): update module github.com/longhorn/longhorn-engine to v1.9.0 2025-06-08 14:16:09 +00:00
renovate[bot] a531d93f0f fix(deps): update patch digest dependencies 2025-06-08 14:08:36 +00:00
renovate[bot] a38019834c fix(deps): update patch digest dependencies 2025-05-25 15:32:25 +00:00
renovate[bot] cb2711e202 fix(deps): update module github.com/longhorn/longhorn-engine to v1.9.0-rc3 2025-05-25 15:20:27 +00:00
Derek Su 6d875af2e9 chore: update Makefile
Signed-off-by: Derek Su <derek.su@suse.com>
2025-05-20 13:27:15 +09:00
Raphanus Lo 955e286a73 feat: uuid restriction for deleting instances
Longhorn 10888

Signed-off-by: Raphanus Lo <yunchang.lo@suse.com>
2025-05-19 15:08:09 +09:00
Derek Su b1c2c80c6c feat: return UUID in instance Get and List operations
Longhorn 10888

Signed-off-by: Derek Su <derek.su@suse.com>
2025-05-19 15:08:09 +09:00
Derek Su 16cf30433f chore(vendor): update dependencies
Longhorn 10888

Signed-off-by: Derek Su <derek.su@suse.com>
2025-05-19 15:08:09 +09:00
Derek Su c7d17ca194 chore(vendor): update dependencies
Signed-off-by: Derek Su <derek.su@suse.com>
2025-05-17 03:53:14 +00:00
renovate[bot] 8db773a1fe fix(deps): update patch digest dependencies 2025-05-17 03:53:14 +00:00
Damiano Cipriani cc1d9c758c chore(deps): update longhorn-spdk-engine version
Longhorn 10799

Signed-off-by: Damiano Cipriani <damiano.cipriani@suse.com>
2025-05-13 17:32:12 +09:00
Damiano Cipriani e68e922b20 chore(deps): update go-spdk-helper version
Longhorn 10799

Signed-off-by: Damiano Cipriani <damiano.cipriani@suse.com>
2025-05-13 17:32:12 +09:00
renovate[bot] e4763d501c fix(deps): update github.com/longhorn/go-spdk-helper digest to ed9e6ca 2025-05-11 13:32:13 +00:00
renovate[bot] 03a1e4b1af fix(deps): update module github.com/longhorn/longhorn-engine to v1.9.0-rc2 2025-05-11 11:15:08 +00:00
renovate[bot] 30955f0547 fix(deps): update patch digest dependencies 2025-05-11 06:13:30 +00:00
Derek Su f602f30e3c chore(Makefile): update SRC_BRANCH to master
Signed-off-by: Derek Su <derek.su@suse.com>
2025-05-07 18:14:46 +09:00
Derek Su 6b83e83e4c chore(vendor): update dependencies
Signed-off-by: Derek Su <derek.su@suse.com>
2025-05-07 07:06:55 +00:00
Derek Su d0abf2d84f chore(vendor): update dependencies
Signed-off-by: Derek Su <derek.su@suse.com>
2025-05-07 07:06:55 +00:00
Derek Su ea3fe8bdca fix: add List method for gRPC health check server
google.golang.org/grpc v1.72.0 introduces List method.

Signed-off-by: Derek Su <derek.su@suse.com>
2025-05-07 07:06:55 +00:00
renovate[bot] d460241253 fix(deps): update minor dependencies 2025-05-07 07:06:55 +00:00
renovate[bot] eb8aa6464b chore(deps): update dependency go to v1.24.3 2025-05-07 05:55:24 +00:00
renovate[bot] bcf805eaa6 fix(deps): update module github.com/longhorn/longhorn-engine to v1.9.0-rc1 2025-05-04 05:58:10 +00:00
renovate[bot] 14a99036e1 fix(deps): update module github.com/longhorn/longhorn-engine to v1.9.0-dev-20250420 2025-04-26 20:32:53 +08:00
Shuo Wu eb3f9b2c3c fix: update vendor for v2 snapshot hash bugs
Longhorn 10563, 10798

Signed-off-by: Shuo Wu <shuo.wu@suse.com>
2025-04-26 05:53:06 +08:00
David Ko 38723cb99e chore(version): update version file to v1.10.0-dev
Signed-off-by: David Ko <dko@suse.com>
2025-04-25 12:19:26 +00:00
renovate[bot] 77e3d2dd98 fix(deps): update github.com/longhorn/go-spdk-helper digest to bb5fe0a 2025-04-25 17:59:04 +08:00
Phan Le 333d3b9b8d fix: specify gcc13 for SPDK building
longhorn-10768

Signed-off-by: Phan Le <phan.le@suse.com>
2025-04-24 07:38:34 +08:00
Phan Le 654fb323a0 fix: no need to manually install liburing
We already specify the flag when building SPDK here
https://github.com/longhorn/dep-versions/blob/
d6a5694e59025d13a32bc2d86fe02d6ccfcb9ac6/scripts/build-spdk.sh#L42

longhorn-10768

Signed-off-by: Phan Le <phan.le@suse.com>
2025-04-23 08:44:50 +08:00
Derek Su 02bf564cf1 chore(Docker): use liburing2-devel for SPDK ublk
Signed-off-by: Derek Su <derek.su@suse.com>
2025-04-22 13:40:26 +08:00
Shuo Wu 3c87f4a9aa fix: v2 shallow copy check invocation err should not be considered as a rebuilding failure
Signed-off-by: Shuo Wu <shuo.wu@suse.com>
2025-04-22 06:34:22 +08:00
Shuo Wu 69fb39c487 fix: update vendor for v2 snapshot hash cancel
Longhorn 10563

Signed-off-by: Shuo Wu <shuo.wu@suse.com>
2025-04-22 06:34:22 +08:00
renovate[bot] a58d78fc1d fix(deps): update github.com/longhorn/backupstore digest to 0ef762b 2025-04-21 05:02:50 +00:00
Derek Su 641c04ace3 chore(vendor): update dependencies
Signed-off-by: Derek Su <derek.su@suse.com>
2025-04-21 11:27:21 +08:00
renovate[bot] 603d95c502 fix(deps): update patch digest dependencies 2025-04-20 05:21:48 +00:00
Phan Le f6ee9fca32 feat: implement ublk frontend for v2 data engine - part 2
Implement control plane part

longhorn-9456

Signed-off-by: Phan Le <phan.le@suse.com>
2025-04-17 11:57:32 +08:00
Phan Le 183d030748 fix: remove unnecessary commands and add gpg key verification back
1. No need to snappy repo for the build container
2. No need to printout the build script
3. Need to verify gpg key when installing package

longhorn-9456

Signed-off-by: Phan Le <phan.le@suse.com>
2025-04-16 07:57:06 +08:00
Phan Le de8d51c1d7 feat: implement ublk frontend for v2 data engine
longhorn-9456

Signed-off-by: Phan Le <phan.le@suse.com>
2025-04-15 16:19:45 +08:00
Derek Su 8b85df2d89 chore(vendor): update dependencies
Signed-off-by: Derek Su <derek.su@suse.com>
2025-04-15 16:19:45 +08:00
renovate[bot] 5216a2d592 fix(deps): update github.com/longhorn/backupstore digest to f882c9e 2025-04-13 12:14:05 +00:00
renovate[bot] 15faccd4e1 fix(deps): update github.com/longhorn/go-spdk-helper digest to 52d1caf 2025-04-13 09:38:31 +00:00
renovate[bot] 1005a8e625 fix(deps): update patch digest dependencies 2025-04-13 06:10:39 +00:00
renovate[bot] 2d6da69c5c fix(deps): update module golang.org/x/net to v0.39.0 2025-04-13 06:02:14 +00:00
Shuo Wu 1e198cc322 feat: implement v2 proxy snapshot hash APIs
Longhorn 10480

Signed-off-by: Shuo Wu <shuo.wu@suse.com>
2025-04-11 10:19:30 +08:00
Shuo Wu 8feb9259d6 fix: update vendor for v2 snapshot hash APIs
Longhorn 10480

Signed-off-by: Shuo Wu <shuo.wu@suse.com>
2025-04-11 10:19:30 +08:00
Derek Su abf2cdc51f chore(workflow): add workflow_dispatch in build.yml
Longhorn 10701

Signed-off-by: Derek Su <derek.su@suse.com>
2025-04-11 09:20:59 +08:00
renovate[bot] cdb3a92e9f chore(deps): update registry.suse.com/bci/bci-base docker tag to v15.7 2025-04-06 07:04:32 +00:00
renovate[bot] 94bc9bccbc fix(deps): update module golang.org/x/sync to v0.13.0 2025-04-06 06:58:16 +00:00
renovate[bot] 2142da9855 fix(deps): update github.com/longhorn/go-spdk-helper digest to f54098d 2025-04-06 06:51:03 +00:00
Jack Lin aa68b9d4eb feat: rename the snapshot attribute used by longhorn backing image
ref: longhorn/longhorn 10399

Signed-off-by: Jack Lin <jack.lin@suse.com>
2025-04-02 15:33:04 +08:00
renovate[bot] 526332289f fix(deps): update module golang.org/x/net to v0.38.0 2025-04-01 19:24:05 +00:00
renovate[bot] 4ae37e0b4f fix(deps): update patch digest dependencies 2025-04-01 19:16:52 +00:00
Derek Su 80af2398f3 fix: fix validation errors
Longhorn 10653

Signed-off-by: Derek Su <derek.su@suse.com>
2025-04-01 12:08:40 -07:00
Derek Su 4df50ad888 fix(Dockerfile): use registry.suse.com/bci/bci-base:15.6 instead
registry.suse.com/bci/bci-base:15.7 is in Tech Preview state.

Longhorn 10653

Signed-off-by: Derek Su <derek.su@suse.com>
2025-04-01 12:08:40 -07:00
Derek Su cd0ac50b42 chore(workflow): remove .coderabbit.yaml
Longhorn 10663

Signed-off-by: Derek Su <derek.su@suse.com>
2025-03-31 16:10:13 +08:00
Shuo Wu a341ecb5f2 fix: dockerfile misses some deps in /usr/lib64
Longhorn 10653

Signed-off-by: Shuo Wu <shuo.wu@suse.com>
2025-03-28 12:33:33 +08:00
renovate[bot] c394ffb5b6 fix(deps): update patch digest dependencies 2025-03-23 06:36:16 +00:00
renovate[bot] d1f1886410 fix(deps): update patch digest dependencies 2025-03-15 06:43:17 +00:00
renovate[bot] 325e120f3d chore(deps): update registry.suse.com/bci/bci-base docker tag to v15.7 2025-03-15 06:36:10 +00:00
Derek Su e930865728 chore(vendor): update dependencies
Signed-off-by: Derek Su <derek.su@suse.com>
2025-03-13 14:38:05 +08:00
Derek Su 2e64165788 fix: reorganize imports
Signed-off-by: Derek Su <derek.su@suse.com>
2025-03-13 14:38:05 +08:00
jinhong.kim0 bca6fe8d89 feat: implement MetricsGet to support V1 and V2 data engines in ProxyOps
longhorn 10472

Signed-off-by: jinhong.kim0 <hookak25@gmail.com>
2025-03-13 14:38:05 +08:00
Chin-Ya Huang 69a5641571 fix(build): devel:languages:python:backports.repo not found on medium
Signed-off-by: Chin-Ya Huang <chin-ya.huang@suse.com>
2025-03-13 13:25:14 +08:00
renovate[bot] 23ebf71fe1 fix(deps): update patch digest dependencies 2025-03-13 04:28:58 +00:00
Derek Su a41cede8cb chore(Dockerfile): retry zypper commands if they are failed
Longhorn component image builds often fail due to the instability
of the zypper repository. To mitigate this issue, we can add retries
to the zypper commands in the Dockerfile of each component image.

Longhorn 10573

Signed-off-by: Derek Su <derek.su@suse.com>
2025-03-12 14:27:18 +08:00
Derek Su 1d0941f73c chore(Dockerfile): use latest golangci-lint
Signed-off-by: Derek Su <derek.su@suse.com>
2025-03-12 14:27:18 +08:00
Derek Su 12b5e729ab chore(Dockerfile): revert registry.suse.com/bci/bci-base to 15.6
registry.suse.com/bci/bci-base 15.7 is still in Tech Preview stage.

Signed-off-by: Derek Su <derek.su@suse.com>
2025-03-12 14:27:18 +08:00
renovate[bot] e8f0c8ca54 chore(deps): update registry.suse.com/bci/bci-base docker tag to v15.7 2025-03-11 01:11:56 +00:00
renovate[bot] 76069e1e1c fix(deps): update patch digest dependencies 2025-03-11 01:05:28 +00:00
Derek Su 51e5b6bc3f chore(Dockerfile): revert registry.suse.com/bci/bci-base to 15.6
registry.suse.com/bci/bci-base 15.7 is still in Tech Preview.

Signed-off-by: Derek Su <derek.su@suse.com>
2025-03-11 00:00:41 +00:00
Derek Su 9cc0e648b1 chore(Dockerfile): update golangci-lint to v1.64.6
Signed-off-by: Derek Su <derek.su@suse.com>
2025-03-11 00:00:41 +00:00
renovate[bot] e0f0dfa8cf chore(deps): update docker dependencies 2025-03-11 00:00:41 +00:00
renovate[bot] c8f17f6323 fix(deps): update minor dependencies 2025-03-08 06:00:19 +00:00
renovate[bot] a1cba0dbc6 fix(deps): update patch digest dependencies 2025-03-08 05:46:26 +00:00
renovate[bot] b4de77ef22 fix(deps): update patch digest dependencies 2025-03-01 03:09:04 +00:00
Derek Su 750c0a4703 chore(vendor): update dependencies
Signed-off-by: Derek Su <derek.su@suse.com>
2025-02-26 08:29:39 +08:00
Derek Su 28c911e9e0 chore(vendor): update dependencies
Signed-off-by: Derek Su <derek.su@suse.com>
2025-02-19 14:33:30 +08:00
Raphanus Lo f1f1367793 fix(ci): fixed project name
Signed-off-by: Raphanus Lo <yunchang.lo@suse.com>
2025-02-17 22:49:04 +08:00
Raphanus Lo 6adfef8265 fix(ci): fix binary dependency for local build
Signed-off-by: Raphanus Lo <yunchang.lo@suse.com>
2025-02-17 15:29:41 +08:00
Raphanus Lo 5de8211c4e feat: self-contained container image build
Signed-off-by: Raphanus Lo <yunchang.lo@suse.com>
2025-02-17 12:12:56 +08:00
renovate[bot] 11aa2f0e6f fix(deps): update patch digest dependencies 2025-02-15 06:05:50 +00:00
renovate[bot] 4b876b4277 chore(deps): update dependency go to v1.24.0 2025-02-15 05:49:35 +00:00
Derek Su 1c6cc0d740 chore(build): move common functions to longhorn/dep-versions
Longhorn 10208

Signed-off-by: Derek Su <derek.su@suse.com>
2025-02-12 12:48:41 +08:00
Derek Su a8e2cbe17f chore(vendor): update dependencies
Signed-off-by: Derek Su <derek.su@suse.com>
2025-02-11 18:24:08 +08:00
Derek Su 2d17049d80 chore(build): support tag in longhorn/dep-versions
Longhorn 10208

Signed-off-by: Derek Su <derek.su@suse.com>
2025-02-11 08:50:17 +08:00
renovate[bot] e3164dfb18 fix(deps): update github.com/longhorn/backupstore digest to c552364 2025-02-09 10:21:18 +00:00
Shuo Wu feced3ddab fix: proxy add tcp prefix for v2 engine API ReplicaList returning address
Longhorn 10347

Signed-off-by: Shuo Wu <shuo.wu@suse.com>
2025-02-07 14:07:59 +08:00
Derek Su feb52fedc2 chore(build): remove unused RUN command
Longhorn 10208

Signed-off-by: Derek Su <derek.su@suse.com>
2025-02-07 13:21:00 +08:00
Derek Su cdabff0e65 chore(vendor): update dependencies
Signed-off-by: Derek Su <derek.su@suse.com>
2025-02-07 13:01:13 +08:00
renovate[bot] 6fdbe04c25 fix(deps): update patch digest dependencies 2025-02-07 02:38:06 +00:00
renovate[bot] 99678e56f3 fix(deps): update module golang.org/x/sync to v0.11.0 2025-02-07 02:22:31 +00:00
Derek Su b000b0d430 chore(build): move deps build codes to longhorn/dep-versions
- move deps build codes to longhorn/dep-versions
- update workflows/build.yml

Longhorn 10208

Signed-off-by: Derek Su <derek.su@suse.com>
2025-02-06 13:05:19 +08:00
Shuo Wu 0a4a5406e1 fix: proxy remove tcp prefix for v2 replica remove
Longhorn 10335

Signed-off-by: Shuo Wu <shuo.wu@suse.com>
2025-02-05 08:23:32 +00:00
Derek Su 1a1da463af chore: versions of dependent packages are centrally managed in longhorn/dep-versions
Longhorn 10208

Signed-off-by: Derek Su <derek.su@suse.com>
2025-02-05 02:30:06 +00:00
renovate[bot] 21d560e364 fix(deps): update module google.golang.org/protobuf to v1.36.4 2025-01-26 10:38:33 +00:00
renovate[bot] 7614a8ed26 fix(deps): update module google.golang.org/grpc to v1.70.0 2025-01-26 10:22:59 +00:00
renovate[bot] 82374c34dc fix(deps): update module github.com/longhorn/longhorn-engine to v1.8.0 2025-01-26 10:08:37 +00:00
renovate[bot] 7218758f30 fix(deps): update module github.com/longhorn/longhorn-engine to v1.8.0-rc4 2025-01-18 05:29:11 +00:00
renovate[bot] 0d64550c84 chore(deps): update dependency go to v1.23.5 2025-01-18 05:14:55 +00:00
renovate[bot] 91d6dadfa1 fix(deps): update module k8s.io/mount-utils to v0.32.1 2025-01-16 09:06:57 +00:00
Derek Su b52f057835 chore(vendor): update dependencies
Signed-off-by: Derek Su <derek.su@suse.com>
2025-01-16 08:51:17 +00:00
renovate[bot] b387fc3fce fix(deps): update module github.com/longhorn/longhorn-engine to v1.8.0-rc3 2025-01-16 08:51:17 +00:00
Derek Su 2d0a2ece38 chore(vendor): update dependencies
Longhorn 10212

Signed-off-by: Derek Su <derek.su@suse.com>
2025-01-15 12:24:11 +08:00
Jack Lin 7b00c7226c fix: skip writing zero when dumping data to backing image lvol
ref: longhorn/longhorn 9876

Signed-off-by: Jack Lin <jack.lin@suse.com>
2025-01-13 06:15:50 +00:00
Derek Su eba0eb8782 chore: update spdk to 1975133
Signed-off-by: Derek Su <derek.su@suse.com>
2025-01-13 13:51:03 +08:00
renovate[bot] 2d59975e04 fix(deps): update module golang.org/x/net to v0.34.0 2025-01-11 02:49:17 +00:00
renovate[bot] c4e44c6312 fix(deps): update patch digest dependencies 2025-01-11 02:33:19 +00:00
Damiano Cipriani 2da0a00e3b chore(Dockerfile): update spdk version
Longhorn 10140

Signed-off-by: Damiano Cipriani <damiano.cipriani@suse.com>
2025-01-11 03:13:58 +08:00
Damiano Cipriani b96535522f chore(Dockerfile): update go-spdk-helper version
longhorn 9922

Signed-off-by: Damiano Cipriani <damiano.cipriani@suse.com>
2025-01-10 13:29:40 +08:00
Chin-Ya Huang 57958291ee chore: update vendor
longhorn/longhorn-10160

Signed-off-by: Chin-Ya Huang <chin-ya.huang@suse.com>
2025-01-09 07:17:42 +00:00
Derek Su f6f09c9bf8 chore(workflow): update arm64 runner to longhorn-infra-oracle-arm64-runners
Longhorn 10145

Signed-off-by: Derek Su <derek.su@suse.com>
2025-01-07 21:38:20 +08:00
Derek Su e849415703 chore(build): refresh repos
Signed-off-by: Derek Su <derek.su@suse.com>
2025-01-07 17:07:14 +08:00
Derek Su 0296019264 chore(vendor): update dependencies
Signed-off-by: Derek Su <derek.su@suse.com>
2025-01-07 17:07:14 +08:00
David Ko abe2471133 chore(version): update version file to v1.9.0-dev
Signed-off-by: David Ko <dko@suse.com>
2025-01-07 07:48:10 +00:00
Derek Su 98eb5b2a4e chore(vendor): update dependencies
Longhorn 10116

Signed-off-by: Derek Su <derek.su@suse.com>
2025-01-03 10:35:26 +08:00
David Ko 88fb9e0202
chore(action): create backport-pr.yml
Signed-off-by: David Ko <dko@suse.com>
2025-01-02 18:29:08 +08:00
Derek Su 82efffda7d chore(vendor): update longhorn-spdk-engine
Signed-off-by: Derek Su <derek.su@suse.com>
2025-01-01 08:12:03 +00:00
renovate[bot] 511b897bdc fix(deps): update module github.com/longhorn/longhorn-engine to v1.8.0-rc2 2025-01-01 08:12:03 +00:00
renovate[bot] e63b68bb6c fix(deps): update module github.com/longhorn/longhorn-engine to v1.8.0-rc1 2024-12-28 04:48:48 +00:00
Derek Su 4c1a672e37 chore(vendor): update dependencies
Signed-off-by: Derek Su <derek.su@suse.com>
2024-12-27 16:32:23 +00:00
renovate[bot] a09ea8654b fix(deps): update patch digest dependencies 2024-12-27 16:32:23 +00:00
Derek Su 8cab29bacd chore(vendor): update dependencies
Longhorn 9953

Signed-off-by: Derek Su <derek.su@suse.com>
2024-12-27 14:00:32 +08:00
Derek Su e5c0a2fb31 chore(vendor): update dependencies
Longhorn 9953

Signed-off-by: Derek Su <derek.su@suse.com>
2024-12-27 11:40:32 +08:00
Shuo Wu d31f0b5a6e fix: update go_spdk_helper commit ID for dapper file
Longhorn 5573

Signed-off-by: Shuo Wu <shuo.wu@suse.com>
2024-12-27 10:13:30 +08:00
Derek Su 103393fef5 chore(vendor): update dependencies
Longhorn 10033

Signed-off-by: Derek Su <derek.su@suse.com>
2024-12-26 14:41:25 +08:00
renovate[bot] 383fb2241b fix(deps): update module github.com/longhorn/longhorn-engine to v1.8.0-rc1 2024-12-21 04:55:08 +00:00
Damiano Cipriani bc15511d0b fix(deps): update deps for SPDK snapshot checksum
Dockerfile has been updated to point at SPDK commit ID with
snapshot checksum. Also longhorn-spdk-engine has been updated.

Longhorn 5573

Signed-off-by: Damiano Cipriani <damiano.cipriani@suse.com>
2024-12-20 16:40:24 +08:00
398 changed files with 20533 additions and 13561 deletions

View File

@ -1,23 +0,0 @@
# yaml-language-server: $schema=https://coderabbit.ai/integrations/schema.v2.json
language: "en-US"
early_access: false
reviews:
request_changes_workflow: false
high_level_summary: false
poem: false
review_status: true
collapse_walkthrough: false
auto_review:
enabled: true
ignore_title_keywords:
- "WIP"
- "DO NOT MERGE"
drafts: false
base_branches:
- master
path_filters:
- "!vendor/**"
- "!go.mod"
- "!go.sum"
chat:
auto_reply: true

12
.github/workflows/backport-pr.yml vendored Normal file
View File

@ -0,0 +1,12 @@
name: Link-Backport-PR-Issue
on:
pull_request:
types: [opened]
branches:
- master
- "v*"
jobs:
call-workflow:
uses: longhorn/longhorn/.github/workflows/backport-pr.yml@master

View File

@ -7,7 +7,50 @@ on:
tags:
- v*
pull_request:
workflow_dispatch:
jobs:
build_info:
name: Collect build info
runs-on: ubuntu-latest
outputs:
version_major: ${{ steps.build_info.outputs.version_major }}
version_minor: ${{ steps.build_info.outputs.version_minor }}
version_patch: ${{ steps.build_info.outputs.version_patch }}
image_tag: ${{ steps.build_info.outputs.image_tag }}
steps:
- id: build_info
name: Declare build info
run: |
version_major=''
version_minor=''
version_patch=''
image_tag=''
branch=${GITHUB_HEAD_REF:-${GITHUB_REF#refs/heads/}}
ref=${{ github.ref }}
if [[ "$ref" =~ 'refs/tags/' ]]; then
version=$(sed -E 's/^v([0-9]*\.[0-9]*\.[0-9]*).*$/\1/' <<<${{ github.ref_name }} )
version_major=$(cut -d. -f1 <<<$version)
version_minor=$(cut -d. -f2 <<<$version)
version_patch=$(cut -d. -f3 <<<$version)
image_tag=${{ github.ref_name }}
elif [[ "$ref" =~ 'refs/heads/' ]]; then
image_tag="${branch}-head"
fi
echo "version_major=${version_major}" >>$GITHUB_OUTPUT
echo "version_minor=${version_minor}" >>$GITHUB_OUTPUT
echo "version_patch=${version_patch}" >>$GITHUB_OUTPUT
echo "image_tag=${image_tag}" >>$GITHUB_OUTPUT
cat <<EOF
version_major=${version_major}
version_minor=${version_minor}
version_patch=${version_patch}
image_tag=${image_tag}
EOF
build-amd64-binaries:
name: Build AMD64 binaries
runs-on: ubuntu-latest
@ -17,7 +60,7 @@ jobs:
# Build binaries
- name: Run make ci
run: make ci
run: SKIP_TASKS=package make ci
- uses: codecov/codecov-action@v4
with:
@ -33,7 +76,7 @@ jobs:
build-arm64-binaries:
name: Build ARM64 binaries
runs-on: longhorn-infra-arm64-runners
runs-on: longhorn-infra-oracle-arm64-runners
steps:
- name: Install make curl git
run: |
@ -45,7 +88,7 @@ jobs:
# Build binaries
- name: Run make ci
run: sudo make ci
run: sudo SKIP_TASKS=package make ci
- name: Upload binaries
uses: actions/upload-artifact@v4
@ -57,7 +100,7 @@ jobs:
name: Build and push AMD64 images
runs-on: ubuntu-latest
if: ${{ startsWith(github.ref, 'refs/heads/') || startsWith(github.ref, 'refs/tags/') }}
needs: build-amd64-binaries
needs: [build_info, build-amd64-binaries]
steps:
- name: Checkout code
uses: actions/checkout@v4
@ -67,10 +110,6 @@ jobs:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Declare branch
run: |
echo "branch=${GITHUB_HEAD_REF:-${GITHUB_REF#refs/heads/}}" >> "$GITHUB_ENV"
- name: Download binaries
uses: actions/download-artifact@v4
with:
@ -81,10 +120,6 @@ jobs:
run: |
chmod +x ./bin/*
- name: Copy bin folder to package
run: |
cp -r ./bin ./package/
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
@ -92,37 +127,24 @@ jobs:
password: ${{ secrets.DOCKER_PASSWORD }}
# longhornio/longhorn-instance-manager image
- name: docker-publish-longhorn-instance-manager
if: ${{ startsWith(github.ref, 'refs/heads/') }}
uses: docker/build-push-action@v5
with:
context: .
push: true
platforms: linux/amd64
tags: longhornio/longhorn-instance-manager:${{ env.branch }}-head-amd64
file: package/Dockerfile
sbom: true
build-args: |
ARCH=amd64
- name: docker-publish-longhorn-instance-manager-with-tag
if: ${{ startsWith(github.ref, 'refs/tags/') }}
uses: docker/build-push-action@v5
with:
context: .
push: true
platforms: linux/amd64
tags: longhornio/longhorn-instance-manager:${{ github.ref_name }}-amd64
file: package/Dockerfile
sbom: true
build-args: |
ARCH=amd64
- name: Build and publish image
env:
REPO: docker.io/longhornio
TAG: ${{ needs.build_info.outputs.image_tag }}-amd64
TARGET_PLATFORMS: linux/amd64
run: make workflow-image-build-push
build-push-arm64-images:
name: Build and push ARM64 images
runs-on: longhorn-infra-arm64-runners
runs-on: longhorn-infra-oracle-arm64-runners
if: ${{ startsWith(github.ref, 'refs/heads/') || startsWith(github.ref, 'refs/tags/') }}
needs: build-arm64-binaries
needs: [build_info, build-arm64-binaries]
steps:
- name: Install make curl git
run: |
sudo apt update
sudo apt-get -y install make curl git
- name: Checkout code
uses: actions/checkout@v4
@ -131,9 +153,6 @@ jobs:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Declare branch and sha_short
run: |
echo "branch=${GITHUB_HEAD_REF:-${GITHUB_REF#refs/heads/}}" >> "$GITHUB_ENV"
- name: Download binaries
uses: actions/download-artifact@v4
with:
@ -144,10 +163,6 @@ jobs:
run: |
chmod +x ./bin/*
- name: Copy bin folder to package
run: |
cp -r ./bin ./package/
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
@ -155,48 +170,22 @@ jobs:
password: ${{ secrets.DOCKER_PASSWORD }}
# longhornio/longhorn-instance-manager image
- name: docker-publish-longhorn-instance-manager
if: ${{ startsWith(github.ref, 'refs/heads/') }}
uses: docker/build-push-action@v5
with:
context: .
push: true
platforms: linux/arm64
tags: longhornio/longhorn-instance-manager:${{ env.branch }}-head-arm64
file: package/Dockerfile
sbom: true
build-args: |
ARCH=arm64
- name: docker-publish-longhorn-instance-manager-with-tag
if: ${{ startsWith(github.ref, 'refs/tags/') }}
uses: docker/build-push-action@v5
with:
context: .
push: true
platforms: linux/arm64
tags: longhornio/longhorn-instance-manager:${{ github.ref_name }}-arm64
file: package/Dockerfile
sbom: true
build-args: |
ARCH=arm64
- name: Build and publish image
env:
REPO: docker.io/longhornio
TAG: ${{ needs.build_info.outputs.image_tag }}-arm64
TARGET_PLATFORMS: linux/arm64
run: make workflow-image-build-push
manifest-image:
name: Manifest images
runs-on: ubuntu-latest
needs: [build-push-amd64-images, build-push-arm64-images]
needs: [build_info, build-push-amd64-images, build-push-arm64-images]
if: ${{ startsWith(github.ref, 'refs/heads/') || startsWith(github.ref, 'refs/tags/') }}
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Declare branch and sha_short
run: |
echo "branch=${GITHUB_HEAD_REF:-${GITHUB_REF#refs/heads/}}" >> "$GITHUB_ENV"
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
@ -204,19 +193,8 @@ jobs:
password: ${{ secrets.DOCKER_PASSWORD }}
# longhornio/longhorn-instance-manager image
- name: docker-pull-longhorn-instance-manager
if: ${{ startsWith(github.ref, 'refs/heads/') }}
run: |
docker pull --platform linux/amd64 longhornio/longhorn-instance-manager:${{ env.branch }}-head-amd64
docker pull --platform linux/arm64 longhornio/longhorn-instance-manager:${{ env.branch }}-head-arm64
docker buildx imagetools create -t longhornio/longhorn-instance-manager:${{ env.branch }}-head \
longhornio/longhorn-instance-manager:${{ env.branch }}-head-amd64 \
longhornio/longhorn-instance-manager:${{ env.branch }}-head-arm64
- name: docker-pull-longhorn-instance-manager-with-tag
if: ${{ startsWith(github.ref, 'refs/tags/') }}
run: |
docker pull --platform linux/amd64 longhornio/longhorn-instance-manager:${{ github.ref_name }}-amd64
docker pull --platform linux/arm64 longhornio/longhorn-instance-manager:${{ github.ref_name }}-arm64
docker buildx imagetools create -t longhornio/longhorn-instance-manager:${{ github.ref_name }} \
longhornio/longhorn-instance-manager:${{ github.ref_name }}-amd64 \
longhornio/longhorn-instance-manager:${{ github.ref_name }}-arm64
- name: docker-pull-manifest-longhorn-instance-manager
env:
REPO: docker.io/longhornio
TAG: ${{ needs.build_info.outputs.image_tag }}
run: make workflow-manifest-image

29
.github/workflows/scripts/build.sh vendored Normal file
View File

@ -0,0 +1,29 @@
#!/bin/bash
function convert_version_to_major_minor_x() {
local version="$1"
if [[ "$version" =~ ^v([0-9]+)\.([0-9]+)\. ]]; then
echo "v${BASH_REMATCH[1]}.${BASH_REMATCH[2]}.x"
else
echo "Invalid version format: $version"
fi
}
function get_branch() {
local version_file="version"
if [[ ! -f $version_file ]]; then
echo "Error: Version file '$version_file' not found."
exit 1
fi
local version=$(cat "$version_file")
local branch=$(convert_version_to_major_minor_x "$version")
# Fetch versions.json from the appropriate branch, fallback to main
wget -q "https://raw.githubusercontent.com/longhorn/dep-versions/${branch}/versions.json" -O versions.json
if [ $? -eq 0 ]; then
echo "${branch}"
else
echo "main"
fi
}

View File

@ -1,43 +1,58 @@
FROM registry.suse.com/bci/golang:1.23
FROM registry.suse.com/bci/golang:1.24
ARG DAPPER_HOST_ARCH
ARG http_proxy
ARG https_proxy
ARG SRC_BRANCH=master
ARG SRC_TAG
ARG CACHEBUST
ENV HOST_ARCH=${DAPPER_HOST_ARCH} ARCH=${DAPPER_HOST_ARCH}
ENV DAPPER_DOCKER_SOCKET true
ENV DAPPER_ENV TAG REPO DRONE_REPO DRONE_PULL_REQUEST DRONE_COMMIT_REF
ENV DAPPER_ENV TAG REPO DRONE_REPO DRONE_PULL_REQUEST DRONE_COMMIT_REF SKIP_TASKS
ENV DAPPER_OUTPUT bin coverage.out
ENV DAPPER_RUN_ARGS --privileged --tmpfs /go/src/github.com/longhorn/longhorn-engine/integration/.venv:exec --tmpfs /go/src/github.com/longhorn/longhorn-engine/integration/.tox:exec -v /dev:/host/dev -v /proc:/host/proc
ENV DAPPER_SOURCE /go/src/github.com/longhorn/longhorn-instance-manager
ENV GOLANGCI_LINT_VERSION="v1.60.3"
ENV SRC_BRANCH ${SRC_BRANCH}
ENV SRC_TAG ${SRC_TAG}
WORKDIR ${DAPPER_SOURCE}
ENTRYPOINT ["./scripts/entry"]
CMD ["ci"]
RUN zypper refresh && \
zypper -n addrepo --refresh https://download.opensuse.org/repositories/system:/snappy/SLE_15/system:snappy.repo && \
zypper -n --gpg-auto-import-keys ref
RUN zypper -n ref && \
zypper update -y
# Install packages
RUN zypper -n install cmake wget curl git less file \
libglib-2_0-0 libkmod-devel libnl3-devel linux-glibc-devel pkg-config \
psmisc tox qemu-tools fuse python3-devel git zlib-devel zlib-devel-static \
psmisc tox qemu-tools fuse python3-devel zlib-devel zlib-devel-static \
bash-completion rdma-core-devel libibverbs xsltproc docbook-xsl-stylesheets \
perl-Config-General libaio-devel glibc-devel-static glibc-devel iptables libltdl7 libdevmapper1_03 iproute2 jq docker gcc gcc-c++ && \
perl-Config-General libaio-devel glibc-devel-static glibc-devel iptables libltdl7 \
libdevmapper1_03 iproute2 jq docker gcc gcc-c++ automake gettext gettext-tools libtool && \
rm -rf /var/cache/zypp/*
# Install golanci-lint
RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin ${GOLANGCI_LINT_VERSION}
RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin latest
# If TAG is explicitly set and exists in the repo, switch to the tag
RUN git clone https://github.com/longhorn/dep-versions.git -b ${SRC_BRANCH} /usr/src/dep-versions && \
cd /usr/src/dep-versions && \
if [ -n "${SRC_TAG}" ] && git show-ref --tags ${SRC_TAG} > /dev/null 2>&1; then \
echo "Checking out tag ${SRC_TAG}"; \
cd /usr/src/dep-versions && git checkout tags/${SRC_TAG}; \
fi
# Install libqcow to resolve error:
# vendor/github.com/longhorn/longhorn-engine/pkg/qcow/libqcow.go:6:11: fatal error: libqcow.h: No such file or directory
RUN curl -sSfL https://s3-us-west-1.amazonaws.com/rancher-longhorn/libqcow-alpha-20181117.tar.gz | tar xvzf - -C /usr/src && \
cd /usr/src/libqcow-20181117 && \
./configure && \
make -j$(nproc) && \
make install && \
RUN export REPO_OVERRIDE="" && \
export COMMIT_ID_OVERRIDE="" && \
bash /usr/src/dep-versions/scripts/build-libqcow.sh "${REPO_OVERRIDE}" "${COMMIT_ID_OVERRIDE}" && \
ldconfig
# Docker Builx: The docker version in dapper is too old to have buildx. Install it manually.
RUN curl -sSfLO https://github.com/docker/buildx/releases/download/v0.13.1/buildx-v0.13.1.linux-${ARCH} && \
chmod +x buildx-v0.13.1.linux-${ARCH} && \
mv buildx-v0.13.1.linux-${ARCH} /usr/local/bin/buildx

View File

@ -1,4 +1,15 @@
PROJECT := longhorn-instance-manager
TARGETS := $(shell ls scripts)
MACHINE := longhorn
# Define the target platforms that can be used across the ecosystem.
# Note that what would actually be used for a given project will be
# defined in TARGET_PLATFORMS, and must be a subset of the below:
DEFAULT_PLATFORMS := linux/amd64,linux/arm64
export SRC_BRANCH := $(shell bash -c 'source <(curl -s "https://raw.githubusercontent.com/longhorn/dep-versions/master/scripts/common.sh") && get_branch')
export SRC_TAG := $(shell git tag --points-at HEAD | head -n 1)
export CACHEBUST := $(shell date +%s)
.dapper:
@echo Downloading dapper
@ -10,6 +21,28 @@ TARGETS := $(shell ls scripts)
$(TARGETS): .dapper
./.dapper $@
.PHONY: buildx-machine
buildx-machine:
@docker buildx create --name=$(MACHINE) --platform=$(DEFAULT_PLATFORMS) 2>/dev/null || true
docker buildx inspect $(MACHINE)
# variables needed from GHA caller:
# - REPO: image repo, include $registry/$repo_path
# - TAG: image tag
# - TARGET_PLATFORMS: optional, to be passed for buildx's --platform option
# - IID_FILE_FLAG: optional, options to generate image ID file
.PHONY: workflow-image-build-push workflow-image-build-push-secure workflow-manifest-image
workflow-image-build-push: buildx-machine
MACHINE=$(MACHINE) PUSH='true' IMAGE_NAME=$(PROJECT) bash scripts/package
workflow-image-build-push-secure: buildx-machine
MACHINE=$(MACHINE) PUSH='true' IMAGE_NAME=$(PROJECT) IS_SECURE=true bash scripts/package
workflow-manifest-image:
docker pull --platform linux/amd64 ${REPO}/longhorn-instance-manager:${TAG}-amd64
docker pull --platform linux/arm64 ${REPO}/longhorn-instance-manager:${TAG}-arm64
docker buildx imagetools create -t ${REPO}/longhorn-instance-manager:${TAG} \
${REPO}/longhorn-instance-manager:${TAG}-amd64 \
${REPO}/longhorn-instance-manager:${TAG}-arm64
trash: .dapper
./.dapper -m bind trash

View File

@ -57,9 +57,13 @@ func createProcess(c *cli.Context) error {
cli, err := getProcessManagerClient(c, ctx, cancel)
if err != nil {
return errors.Wrap(err, "failed to initialize client")
return errors.Wrap(err, "failed to initialize ProcessManager client")
}
defer cli.Close()
defer func() {
if closeErr := cli.Close(); closeErr != nil {
logrus.WithError(closeErr).Warn("Failed to close ProcessManager client")
}
}()
process, err := cli.ProcessCreate(c.String("name"), c.String("binary"),
c.Int("port-count"), c.Args(), c.StringSlice("port-args"))
@ -76,6 +80,11 @@ func ProcessDeleteCmd() cli.Command {
cli.StringFlag{
Name: "name",
},
cli.StringFlag{
Name: "uuid",
Required: false,
Usage: "Validate the process UUID. If provided, the process will be deleted only when both name and UUID are matched.",
},
},
Action: func(c *cli.Context) {
if err := deleteProcess(c); err != nil {
@ -91,11 +100,15 @@ func deleteProcess(c *cli.Context) error {
cli, err := getProcessManagerClient(c, ctx, cancel)
if err != nil {
return errors.Wrap(err, "failed to initialize client")
return errors.Wrap(err, "failed to initialize ProcessManager client")
}
defer cli.Close()
defer func() {
if closeErr := cli.Close(); closeErr != nil {
logrus.WithError(closeErr).Warn("Failed to close ProcessManager client")
}
}()
process, err := cli.ProcessDelete(c.String("name"))
process, err := cli.ProcessDelete(c.String("name"), c.String("uuid"))
if err != nil {
return errors.Wrap(err, "failed to delete process")
}
@ -123,9 +136,13 @@ func getProcess(c *cli.Context) error {
defer cancel()
cli, err := getProcessManagerClient(c, ctx, cancel)
if err != nil {
return errors.Wrap(err, "failed to initialize client")
return errors.Wrap(err, "failed to initialize ProcessManager client")
}
defer cli.Close()
defer func() {
if closeErr := cli.Close(); closeErr != nil {
logrus.WithError(closeErr).Warn("Failed to close ProcessManager client")
}
}()
process, err := cli.ProcessGet(c.String("name"))
if err != nil {
@ -151,9 +168,13 @@ func listProcess(c *cli.Context) error {
defer cancel()
cli, err := getProcessManagerClient(c, ctx, cancel)
if err != nil {
return errors.Wrap(err, "failed to initialize client")
return errors.Wrap(err, "failed to initialize ProcessManager client")
}
defer cli.Close()
defer func() {
if closeErr := cli.Close(); closeErr != nil {
logrus.WithError(closeErr).Warn("Failed to close ProcessManager client")
}
}()
processes, err := cli.ProcessList()
if err != nil {
@ -198,9 +219,13 @@ func replaceProcess(c *cli.Context) error {
defer cancel()
cli, err := getProcessManagerClient(c, ctx, cancel)
if err != nil {
return errors.Wrap(err, "failed to initialize client")
return errors.Wrap(err, "failed to initialize ProcessManager client")
}
defer cli.Close()
defer func() {
if closeErr := cli.Close(); closeErr != nil {
logrus.WithError(closeErr).Warn("Failed to close ProcessManager client")
}
}()
process, err := cli.ProcessReplace(c.String("name"), c.String("binary"),
c.Int("port-count"), c.Args(), c.StringSlice("port-args"), c.String("terminate-signal"))
@ -223,7 +248,7 @@ func getProcessManagerClient(c *cli.Context, ctx context.Context, ctxCancel cont
if err == nil {
return imClient, err
}
logrus.WithError(err).Info("Falling back to non tls client")
logrus.WithError(err).Info("Falling back to non tls ProcessManager client")
}
return client.NewProcessManagerClient(ctx, ctxCancel, url, nil)

View File

@ -43,9 +43,13 @@ func version(c *cli.Context) error {
if !c.Bool("client-only") {
cli, err := getProcessManagerClient(c, ctx, cancel)
if err != nil {
return errors.Wrap(err, "failed to initialize client")
return errors.Wrap(err, "failed to initialize ProcessManagerClient")
}
defer cli.Close()
defer func() {
if closeErr := cli.Close(); closeErr != nil {
logrus.WithError(closeErr).Warn("Failed to close ProcessManagerClient")
}
}()
version, err := cli.VersionGet()
if err != nil {

64
go.mod
View File

@ -1,26 +1,26 @@
module github.com/longhorn/longhorn-instance-manager
go 1.23.0
go 1.24.0
toolchain go1.23.4
toolchain go1.24.5
require (
github.com/google/uuid v1.6.0
github.com/longhorn/backupstore v0.0.0-20241218004307-b75ee1322085
github.com/longhorn/go-common-libs v0.0.0-20241218045609-c76c9e237a7d
github.com/longhorn/go-spdk-helper v0.0.0-20241218003628-6c9c87b76aa0
github.com/longhorn/longhorn-engine v1.8.0-dev-20241215
github.com/longhorn/longhorn-spdk-engine v0.0.0-20241219055252-d82cf2ad63bc
github.com/longhorn/types v0.0.0-20241217083824-2e0ecb487ccf
github.com/longhorn/backupstore v0.0.0-20250624115502-f6e828377c27
github.com/longhorn/go-common-libs v0.0.0-20250712065607-11215ac4de96
github.com/longhorn/go-spdk-helper v0.0.3-0.20250712161648-42d38592f838
github.com/longhorn/longhorn-engine v1.9.0
github.com/longhorn/longhorn-spdk-engine v0.0.0-20250702033138-07866abda0ed
github.com/longhorn/types v0.0.0-20250710112743-e3a1e9e2a9c1
github.com/pkg/errors v0.9.1
github.com/sirupsen/logrus v1.9.3
github.com/urfave/cli v1.22.16
golang.org/x/net v0.33.0
golang.org/x/sync v0.10.0
google.golang.org/grpc v1.69.2
google.golang.org/protobuf v1.36.0
github.com/urfave/cli v1.22.17
golang.org/x/net v0.42.0
golang.org/x/sync v0.16.0
google.golang.org/grpc v1.73.0
google.golang.org/protobuf v1.36.6
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
k8s.io/mount-utils v0.32.0
k8s.io/mount-utils v0.33.2
)
require (
@ -29,12 +29,12 @@ require (
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0 // indirect
github.com/RoaringBitmap/roaring v1.9.4 // indirect
github.com/aws/aws-sdk-go v1.55.5 // indirect
github.com/aws/aws-sdk-go v1.55.7 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bits-and-blooms/bitset v1.16.0 // indirect
github.com/c9s/goprocinfo v0.0.0-20210130143923-c95fcf8c64a8 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
@ -49,21 +49,20 @@ require (
github.com/gofrs/flock v0.12.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/gnostic-models v0.6.9 // indirect
github.com/google/go-cmp v0.7.0 // indirect
github.com/gorilla/handlers v1.5.2 // indirect
github.com/jinzhu/copier v0.4.0 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/longhorn/go-iscsi-helper v0.0.0-20241208120713-c4ac270fbe7e // indirect
github.com/longhorn/sparse-tools v0.0.0-20241124090628-9499c1fb63d7 // indirect
github.com/longhorn/go-iscsi-helper v0.0.0-20250511111123-ceffe5d4a273 // indirect
github.com/longhorn/sparse-tools v0.0.0-20241216160947-2b328f0fa59c // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mitchellh/go-ps v1.0.0 // indirect
github.com/moby/sys/mountinfo v0.7.2 // indirect
github.com/moby/sys/userns v0.1.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/mschoch/smat v0.2.0 // indirect
@ -82,22 +81,23 @@ require (
github.com/x448/float16 v0.8.4 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/exp v0.0.0-20241215155358-4a5509556b9e // indirect
golang.org/x/oauth2 v0.23.0 // indirect
golang.org/x/sys v0.28.0 // indirect
golang.org/x/term v0.27.0 // indirect
golang.org/x/text v0.21.0 // indirect
golang.org/x/time v0.7.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20241118233622-e639e219e697 // indirect
golang.org/x/exp v0.0.0-20250711185948-6ae5c78190dc // indirect
golang.org/x/oauth2 v0.28.0 // indirect
golang.org/x/sys v0.34.0 // indirect
golang.org/x/term v0.33.0 // indirect
golang.org/x/text v0.27.0 // indirect
golang.org/x/time v0.9.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/api v0.31.3 // indirect
k8s.io/apimachinery v0.32.0 // indirect
k8s.io/apimachinery v0.33.2 // indirect
k8s.io/client-go v0.31.3 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
k8s.io/utils v0.0.0-20241210054802-24370beab758 // indirect
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect
sigs.k8s.io/randfill v1.0.0 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect
sigs.k8s.io/yaml v1.4.0 // indirect
)

156
go.sum
View File

@ -12,11 +12,11 @@ github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0 h1:mlmW46Q0B79I+Aj4a
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0/go.mod h1:PXe2h+LKcWTX9afWdZoHyODqR4fBa5boUM/8uJfZ0Jo=
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU=
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/RoaringBitmap/roaring v1.9.4 h1:yhEIoH4YezLYT04s1nHehNO64EKFTop/wBhxv2QzDdQ=
github.com/RoaringBitmap/roaring v1.9.4/go.mod h1:6AXUsoIEzDTFFQCe1RbGA6uFONMhvejWj5rqITANK90=
github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU=
github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
github.com/aws/aws-sdk-go v1.55.7 h1:UJrkFq7es5CShfBwlWAC8DA077vp8PyVbQd3lqLiztE=
github.com/aws/aws-sdk-go v1.55.7/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
@ -27,8 +27,8 @@ github.com/c9s/goprocinfo v0.0.0-20210130143923-c95fcf8c64a8 h1:SjZ2GvvOononHOpK
github.com/c9s/goprocinfo v0.0.0-20210130143923-c95fcf8c64a8/go.mod h1:uEyr4WpAH4hio6LFriaPkL938XnrvLpNPmQHBdrmbIE=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cpuguy83/go-md2man/v2 v2.0.5 h1:ZtcqGrnekaHpVLArFSe4HK5DoKx1T0rq2DwVB0alcyc=
github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo=
github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@ -71,20 +71,22 @@ github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVI
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw=
github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k=
github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE=
github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w=
github.com/jinzhu/copier v0.4.0 h1:w3ciUoD19shMCRargcpm0cm91ytaBhDvuRpz1ODO/U8=
github.com/jinzhu/copier v0.4.0/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
@ -106,22 +108,22 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/longhorn/backupstore v0.0.0-20241218004307-b75ee1322085 h1:rPAC3JxZlC7Y9ooQmLVZpwNX5JWy30oYqhSIoN7VPBg=
github.com/longhorn/backupstore v0.0.0-20241218004307-b75ee1322085/go.mod h1:SG0ydhcIYJxnBDBd/nm5Oy5rXX86wbRNDbEdywwiMaI=
github.com/longhorn/go-common-libs v0.0.0-20241218045609-c76c9e237a7d h1:dpx9Vm/PjJmTn0vq/nmP+gs3DWDikN7XRZFRg815P5M=
github.com/longhorn/go-common-libs v0.0.0-20241218045609-c76c9e237a7d/go.mod h1:/9MN0k4uIN5vskZeZ4xJ+maMiEAPgcq5tr6KvXETELM=
github.com/longhorn/go-iscsi-helper v0.0.0-20241208120713-c4ac270fbe7e h1:fX1GSLGgQdzBzzwwK1NLvO3dpSj9ce5e3kT9tz8yVsU=
github.com/longhorn/go-iscsi-helper v0.0.0-20241208120713-c4ac270fbe7e/go.mod h1:v+xVeM1Ea065+NHRihSnYjd3Az3D9+XEaEcVgFT8F9E=
github.com/longhorn/go-spdk-helper v0.0.0-20241218003628-6c9c87b76aa0 h1:D30inQQY3gnYL0CXgBSE9jG4aGtNKc72sos2FBDmc0w=
github.com/longhorn/go-spdk-helper v0.0.0-20241218003628-6c9c87b76aa0/go.mod h1:VqNHZ+5LEQzYObYmjxOIQHANUkC14JU+EZYa9DvLNpk=
github.com/longhorn/longhorn-engine v1.8.0-dev-20241215 h1:sTQn2kHZqgwycM9Y3n3fuiSaURxeIS4h98ACJbbJDl8=
github.com/longhorn/longhorn-engine v1.8.0-dev-20241215/go.mod h1:fcQI1dyu1KcQDvxrnHJ3rsSSauCKV1+IzaCLTbtdVY0=
github.com/longhorn/longhorn-spdk-engine v0.0.0-20241219055252-d82cf2ad63bc h1:CSWBCkQAi5Wj1KOwYqzl8M/CofGiPopq6OHAcAfXWv0=
github.com/longhorn/longhorn-spdk-engine v0.0.0-20241219055252-d82cf2ad63bc/go.mod h1:efzTtcWbhMntY785ru8ipccdz9WVitudtUko72apegc=
github.com/longhorn/sparse-tools v0.0.0-20241124090628-9499c1fb63d7 h1:jrW659ETMgyy8kmGAjG03DnaUWSxpFwnqDHlWe0kIUk=
github.com/longhorn/sparse-tools v0.0.0-20241124090628-9499c1fb63d7/go.mod h1:dfbJqfI8+T9ZCp5zhTYcBi/64hPBNt5/vFF3gTlfMmc=
github.com/longhorn/types v0.0.0-20241217083824-2e0ecb487ccf h1:7TFVWpGl0MB9osrliT+j6+Jdr1cAuR/U2noPx0H9yYY=
github.com/longhorn/types v0.0.0-20241217083824-2e0ecb487ccf/go.mod h1:3jHuVDtpkXQzpnp4prguDBskVRric2kmF8aSPkRJ4jw=
github.com/longhorn/backupstore v0.0.0-20250624115502-f6e828377c27 h1:Kf9L7JbsTqfUG+WrwahxfLO28xwrWmN/43Ukl143ns4=
github.com/longhorn/backupstore v0.0.0-20250624115502-f6e828377c27/go.mod h1:+sMmPD1PWsFGhHiytIYODa3vfDuXFdJsw9eNxOST9I0=
github.com/longhorn/go-common-libs v0.0.0-20250712065607-11215ac4de96 h1:+SN5T/B6WvJjlzKWDqswziq9k11XOxK27KlCTrbalW0=
github.com/longhorn/go-common-libs v0.0.0-20250712065607-11215ac4de96/go.mod h1:WJowu2xRMEZ2B9K+SPQCUQpFoiC6yZiAHLZx2cR34QE=
github.com/longhorn/go-iscsi-helper v0.0.0-20250511111123-ceffe5d4a273 h1:JbAv8suyObcgZFo/m+LrnsnWvzivuf6CKZWooLnLYAs=
github.com/longhorn/go-iscsi-helper v0.0.0-20250511111123-ceffe5d4a273/go.mod h1:ZpK2Pb1S93FW5PXgds8HlRy3Kr3I905dNaocHzAxxCM=
github.com/longhorn/go-spdk-helper v0.0.3-0.20250712161648-42d38592f838 h1:j3PgIRQihtn7R94mnVyTvc8YStYKH7JumiEI2yP4YQI=
github.com/longhorn/go-spdk-helper v0.0.3-0.20250712161648-42d38592f838/go.mod h1:QOzb0AX/CUcVtf5h6DMyr0W/7EkQrjKn2N593t35UWs=
github.com/longhorn/longhorn-engine v1.9.0 h1:NSQNwAFQZdF+nywTJbyW0qJBKIWPE6J2Vy0qWCsup4I=
github.com/longhorn/longhorn-engine v1.9.0/go.mod h1:dvVqPWauvH2hrMlI6sDd3y8wsA4wsAtCX7WB+JwcbwA=
github.com/longhorn/longhorn-spdk-engine v0.0.0-20250702033138-07866abda0ed h1:G4hY2NNJ4lsBqB8Oo8ADKO7t5fMycbLDnwT5r/ZvuTA=
github.com/longhorn/longhorn-spdk-engine v0.0.0-20250702033138-07866abda0ed/go.mod h1:iWEUN82bM1aEopJLZ/dhSvJSpq5Kqmq9Lrye7vpVeRs=
github.com/longhorn/sparse-tools v0.0.0-20241216160947-2b328f0fa59c h1:OFz3haCSPdgiiJvXLBeId/4dPu0dxIEqkQkfNMufLwc=
github.com/longhorn/sparse-tools v0.0.0-20241216160947-2b328f0fa59c/go.mod h1:dfbJqfI8+T9ZCp5zhTYcBi/64hPBNt5/vFF3gTlfMmc=
github.com/longhorn/types v0.0.0-20250710112743-e3a1e9e2a9c1 h1:Lox/NlebN9jOc9JXokB270iyeMlyUw9gRePBy5LKwz0=
github.com/longhorn/types v0.0.0-20250710112743-e3a1e9e2a9c1/go.mod h1:3bhH8iUZGZT3kA/B1DYMGzpdzfacqeexOt4SHo4/C2I=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
@ -129,8 +131,6 @@ github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc
github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg=
github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg=
github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4=
github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g=
github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@ -197,27 +197,28 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/urfave/cli v1.22.16 h1:MH0k6uJxdwdeWQTwhSO42Pwr4YLrNLwBtg1MRgTqPdQ=
github.com/urfave/cli v1.22.16/go.mod h1:EeJR6BKodywf4zciqrdw6hpCPk68JO9z5LazXZMn5Po=
github.com/urfave/cli v1.22.17 h1:SYzXoiPfQjHBbkYxbew5prZHS1TOLT3ierW8SYLqtVQ=
github.com/urfave/cli v1.22.17/go.mod h1:b0ht0aqgH/6pBYzzxURyrM4xXNgsoT/n2ZzwQiEhNVo=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY=
go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE=
go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE=
go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY=
go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk=
go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0=
go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys=
go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ=
go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y=
go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M=
go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE=
go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY=
go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg=
go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o=
go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w=
go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs=
go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
@ -225,10 +226,10 @@ go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN8
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/exp v0.0.0-20241215155358-4a5509556b9e h1:4qufH0hlUYs6AO6XmZC3GqfDPGSXHVXUFR6OND+iJX4=
golang.org/x/exp v0.0.0-20241215155358-4a5509556b9e/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c=
golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
golang.org/x/exp v0.0.0-20250711185948-6ae5c78190dc h1:TS73t7x3KarrNd5qAipmspBDS1rkMcgVG/fS1aRb4Rc=
golang.org/x/exp v0.0.0-20250711185948-6ae5c78190dc/go.mod h1:A+z0yzpGtvnG90cToK5n2tu8UJVP2XUATh+r+sfOOOc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -236,16 +237,16 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs=
golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8=
golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc=
golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -253,32 +254,32 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg=
golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4=
golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU=
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8=
golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw=
golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0=
golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/genproto/googleapis/rpc v0.0.0-20241118233622-e639e219e697 h1:LWZqQOEjDyONlF1H6afSWpAL/znlREo2tHfLoe+8LMA=
google.golang.org/genproto/googleapis/rpc v0.0.0-20241118233622-e639e219e697/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU=
google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU=
google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
google.golang.org/protobuf v1.36.0 h1:mjIs9gYtt56AzC4ZaffQuh88TZurBGhIJMBZGSxNerQ=
google.golang.org/protobuf v1.36.0/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 h1:e0AIkUUhxyBKh6ssZNrAMeqhA7RKUj42346d1y02i2g=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok=
google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc=
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
@ -294,21 +295,24 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
k8s.io/api v0.31.3 h1:umzm5o8lFbdN/hIXbrK9oRpOproJO62CV1zqxXrLgk8=
k8s.io/api v0.31.3/go.mod h1:UJrkIp9pnMOI9K2nlL6vwpxRzzEX5sWgn8kGQe92kCE=
k8s.io/apimachinery v0.32.0 h1:cFSE7N3rmEEtv4ei5X6DaJPHHX0C+upp+v5lVPiEwpg=
k8s.io/apimachinery v0.32.0/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
k8s.io/apimachinery v0.33.2 h1:IHFVhqg59mb8PJWTLi8m1mAoepkUNYmptHsV+Z1m5jY=
k8s.io/apimachinery v0.33.2/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM=
k8s.io/client-go v0.31.3 h1:CAlZuM+PH2cm+86LOBemaJI/lQ5linJ6UFxKX/SoG+4=
k8s.io/client-go v0.31.3/go.mod h1:2CgjPUTpv3fE5dNygAr2NcM8nhHzXvxB8KL5gYc3kJs=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y=
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4=
k8s.io/mount-utils v0.32.0 h1:KOQAhPzJICATXnc6XCkWoexKbkOexRnMCUW8APFfwg4=
k8s.io/mount-utils v0.32.0/go.mod h1:Kun5c2svjAPx0nnvJKYQWhfeNW+O0EpzHgRhDcYoSY0=
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4=
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8=
k8s.io/mount-utils v0.33.2 h1:mZAFhoGs/MwJziVlUpA072vqMhXRc0LGl/W3wybLP20=
k8s.io/mount-utils v0.33.2/go.mod h1:1JR4rKymg8B8bCPo618hpSAdrpO6XLh0Acqok/xVwPE=
k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0=
k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA=
sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4=
sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc=
sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=

View File

@ -1,13 +1,17 @@
# Stage 1: build binary from go source code
FROM registry.suse.com/bci/golang:1.23 AS gobuilder
FROM registry.suse.com/bci/golang:1.24 AS gobuilder
ARG ARCH=amd64
ARG SRC_BRANCH=master
ARG SRC_TAG
RUN zypper -n ref && \
zypper update -y
RUN zypper -n addrepo --refresh https://download.opensuse.org/repositories/network:utilities/SLE_15_SP5/network:utilities.repo && \
zypper --gpg-auto-import-keys ref
RUN for i in {1..10}; do \
zypper -n addrepo --refresh https://download.opensuse.org/repositories/network:utilities/SLE_15/network:utilities.repo && \
zypper --gpg-auto-import-keys ref && break || sleep 1; \
done
RUN zypper -n install wget jq
@ -15,119 +19,94 @@ ENV GOLANG_ARCH_amd64=amd64 GOLANG_ARCH_arm64=arm64 GOLANG_ARCH_s390x=s390x GOLA
GOPATH=/go PATH=/go/bin:/usr/local/go/bin:${PATH} SHELL=/bin/bash
RUN go install golang.org/x/lint/golint@latest
# If TAG is explicitly set and exists in the repo, switch to the tag
RUN git clone https://github.com/longhorn/dep-versions.git -b ${SRC_BRANCH} /usr/src/dep-versions && \
cd /usr/src/dep-versions && \
if [ -n "${SRC_TAG}" ] && git show-ref --tags ${SRC_TAG} > /dev/null 2>&1; then \
echo "Checking out tag ${SRC_TAG}"; \
cd /usr/src/dep-versions && git checkout tags/${SRC_TAG}; \
fi
# Build go-spdk-helper
ENV GO_SPDK_HELPER_DIR /usr/src/go-spdk-helper
ENV GO_SPDK_HELPER_COMMIT_ID 6a324e95979662be592bfda0e867d2678ecbc756
RUN git clone https://github.com/longhorn/go-spdk-helper.git ${GO_SPDK_HELPER_DIR} && \
cd ${GO_SPDK_HELPER_DIR} && \
git checkout ${GO_SPDK_HELPER_COMMIT_ID} && \
go build && \
install -m 755 go-spdk-helper /usr/local/bin/go-spdk-helper && \
rm -rf ${GO_SPDK_HELPER_DIR}
RUN export REPO_OVERRIDE="" && \
export COMMIT_ID_OVERRIDE="" && \
bash /usr/src/dep-versions/scripts/build-go-spdk-helper.sh "${REPO_OVERRIDE}" "${COMMIT_ID_OVERRIDE}"
# Install grpc_health_probe
RUN GRPC_HEALTH_PROBE_DOWNLOAD_URL=$(wget -qO- https://api.github.com/repos/grpc-ecosystem/grpc-health-probe/releases/latest | jq -r '.assets[] | select(.name | test("linux.*'"${ARCH}"'"; "i")) | .browser_download_url') && \
RUN export GRPC_HEALTH_PROBE_DOWNLOAD_URL=$(wget -qO- https://api.github.com/repos/grpc-ecosystem/grpc-health-probe/releases/latest | jq -r '.assets[] | select(.name | test("linux.*'"${ARCH}"'"; "i")) | .browser_download_url') && \
wget ${GRPC_HEALTH_PROBE_DOWNLOAD_URL} -O /usr/local/bin/grpc_health_probe && \
chmod +x /usr/local/bin/grpc_health_probe
# Stage 2: build binary from c source code
FROM registry.suse.com/bci/bci-base:15.6 AS cbuilder
FROM registry.suse.com/bci/bci-base:15.7 AS cbuilder
ARG ARCH=amd64
ARG SRC_BRANCH=master
ARG SRC_TAG
ENV LIBLONGHORN_COMMIT_ID 53d1c063b95efc8d949b095bd4bf04637230265f
ENV TGT_COMMIT_ID 3a8bc4823b5390e046f7aa8231ed262c0365c42c
ENV SPDK_COMMIT_ID 10463b5c13b454e173707628b9eac608081f5082
ENV LIBJSONC_COMMIT_ID b4c371fa0cbc4dcbaccc359ce9e957a22988fb34
# nvme-cli 2.10.2
ENV NVME_CLI_COMMIT_ID eeaa08c9a0e9184f3889df0bff3d2a23db6d6294
RUN zypper -n ref && \
zypper update -y
RUN zypper -n addrepo --refresh https://download.opensuse.org/repositories/system:/snappy/SLE_15/system:snappy.repo && \
zypper -n addrepo --refresh https://download.opensuse.org/repositories/network:/utilities/SLE_15/network:utilities.repo && \
zypper -n addrepo --refresh https://download.opensuse.org/repositories/devel:libraries:c_c++/15.6/devel:libraries:c_c++.repo && \
zypper -n addrepo --refresh https://download.opensuse.org/repositories/devel:languages:python:Factory/15.6/devel:languages:python:Factory.repo && \
zypper -n addrepo --refresh https://download.opensuse.org/repositories/devel:languages:python:backports/SLE_15/devel:languages:python:backports.repo && \
zypper -n addrepo --refresh https://download.opensuse.org/repositories/devel:tools:building/15.6/devel:tools:building.repo && \
zypper -n addrepo --refresh https://download.opensuse.org/repositories/filesystems/15.6/filesystems.repo && \
zypper --gpg-auto-import-keys ref
RUN for i in {1..10}; do \
zypper -n addrepo --refresh https://download.opensuse.org/repositories/system:/snappy/SLE_15/system:snappy.repo && \
zypper -n addrepo --refresh https://download.opensuse.org/repositories/network:/utilities/SLE_15/network:utilities.repo && \
zypper -n addrepo --refresh https://download.opensuse.org/repositories/devel:libraries:c_c++/15.6/devel:libraries:c_c++.repo && \
zypper -n addrepo --refresh https://download.opensuse.org/repositories/devel:languages:python:Factory/15.6/devel:languages:python:Factory.repo && \
zypper -n addrepo --refresh https://download.opensuse.org/repositories/devel:tools:building/15.6/devel:tools:building.repo && \
zypper -n addrepo --refresh https://download.opensuse.org/repositories/filesystems/15.6/filesystems.repo && \
zypper --gpg-auto-import-keys ref && break || sleep 1; \
done
RUN zypper -n install cmake gcc xsltproc docbook-xsl-stylesheets git python311 python311-pip patchelf fuse3-devel
# Build liblonghorn
RUN cd /usr/src && \
git clone https://github.com/rancher/liblonghorn.git && \
cd liblonghorn && \
git checkout ${LIBLONGHORN_COMMIT_ID} && \
make; \
make install
RUN zypper -n install cmake gcc gcc13 xsltproc docbook-xsl-stylesheets git python311 python311-pip patchelf fuse3-devel jq wget
# Build TGT
RUN cd /usr/src && \
git clone https://github.com/rancher/tgt.git && \
cd tgt && \
git checkout ${TGT_COMMIT_ID} && \
make; \
make install
# Build spdk
ENV SPDK_DIR /usr/src/spdk
RUN git clone https://github.com/longhorn/spdk.git ${SPDK_DIR} --recursive && \
cd ${SPDK_DIR} && \
git checkout ${SPDK_COMMIT_ID} && \
git submodule update --init && \
sed -i '/python3-pyelftools/d' ./scripts/pkgdep/sles.sh && \
sed -i 's/python3-/python311-/g' ./scripts/pkgdep/sles.sh && \
./scripts/pkgdep.sh && \
pip3 install -r ./scripts/pkgdep/requirements.txt && \
if [ ${ARCH} = "amd64" ]; then \
./configure --target-arch=nehalem --disable-tests --disable-unit-tests --disable-examples && \
make -j$(nproc) && \
make install; \
elif [ ${ARCH} = "arm64" ]; then \
./configure --target-arch=native --disable-tests --disable-unit-tests --disable-examples && \
DPDKBUILD_FLAGS="-Dplatform=generic" make -j$(nproc) && \
make install; \
else \
echo "Unsupported architecture: ${ARCH}"; \
exit 1; \
RUN git clone https://github.com/longhorn/dep-versions.git -b ${SRC_BRANCH} /usr/src/dep-versions && \
cd /usr/src/dep-versions && \
if [ -n "${SRC_TAG}" ] && git show-ref --tags ${SRC_TAG} > /dev/null 2>&1; then \
echo "Checking out tag ${SRC_TAG}"; \
cd /usr/src/dep-versions && git checkout tags/${SRC_TAG}; \
fi
# Build liblonghorn
RUN export REPO_OVERRIDE="" && \
export COMMIT_ID_OVERRIDE="" && \
bash /usr/src/dep-versions/scripts/build-liblonghorn.sh "${REPO_OVERRIDE}" "${COMMIT_ID_OVERRIDE}"
# Build TGT
RUN export REPO_OVERRIDE="" && \
export COMMIT_ID_OVERRIDE="" && \
bash /usr/src/dep-versions/scripts/build-tgt.sh "${REPO_OVERRIDE}" "${COMMIT_ID_OVERRIDE}"
# Build spdk
RUN export REPO_OVERRIDE="" && \
export COMMIT_ID_OVERRIDE="" && \
bash /usr/src/dep-versions/scripts/build-spdk.sh "${REPO_OVERRIDE}" "${COMMIT_ID_OVERRIDE}" "${ARCH}"
# Build libjson-c-devel
RUN cd /usr/src && \
git clone https://github.com/json-c/json-c.git && \
cd json-c && \
git checkout ${LIBJSONC_COMMIT_ID} && \
mkdir .build && \
cd .build && \
cmake ../ && \
make && \
make install
RUN export REPO_OVERRIDE="" && \
export COMMIT_ID_OVERRIDE="" && \
bash /usr/src/dep-versions/scripts/build-libjsonc.sh "${REPO_OVERRIDE}" "${COMMIT_ID_OVERRIDE}"
# Build nvme-cli
ENV NVME_CLI_DIR /usr/src/nvme-cli
RUN git clone https://github.com/linux-nvme/nvme-cli.git ${NVME_CLI_DIR} && \
cd ${NVME_CLI_DIR} && \
git checkout ${NVME_CLI_COMMIT_ID} && \
meson setup --force-fallback-for=libnvme .build && \
meson compile -C .build && \
meson install -C .build
RUN export REPO_OVERRIDE="" && \
export COMMIT_ID_OVERRIDE="" && \
bash /usr/src/dep-versions/scripts/build-nvme-cli.sh "${REPO_OVERRIDE}" "${COMMIT_ID_OVERRIDE}"
# Stage 3: copy binaries to release image
FROM registry.suse.com/bci/bci-base:15.6 AS release
FROM registry.suse.com/bci/bci-base:15.7 AS release
ARG ARCH=amd64
RUN zypper -n ref && \
zypper update -y
RUN zypper -n addrepo --refresh https://download.opensuse.org/repositories/system:/snappy/SLE_15/system:snappy.repo && \
zypper -n addrepo --refresh https://download.opensuse.org/repositories/network:/utilities/SLE_15/network:utilities.repo && \
zypper -n addrepo --refresh https://download.opensuse.org/repositories/devel:libraries:c_c++/15.6/devel:libraries:c_c++.repo && \
zypper -n addrepo --refresh https://download.opensuse.org/repositories/devel:languages:python:Factory/15.6/devel:languages:python:Factory.repo && \
zypper -n addrepo --refresh https://download.opensuse.org/repositories/devel:languages:python:backports/SLE_15/devel:languages:python:backports.repo && \
zypper --gpg-auto-import-keys ref
RUN for i in {1..10}; do \
zypper -n addrepo --refresh https://download.opensuse.org/repositories/system:/snappy/SLE_15/system:snappy.repo && \
zypper -n addrepo --refresh https://download.opensuse.org/repositories/network:/utilities/SLE_15/network:utilities.repo && \
zypper -n addrepo --refresh https://download.opensuse.org/repositories/devel:libraries:c_c++/15.6/devel:libraries:c_c++.repo && \
zypper -n addrepo --refresh https://download.opensuse.org/repositories/devel:languages:python:Factory/15.6/devel:languages:python:Factory.repo && \
zypper --gpg-auto-import-keys ref && break || sleep 1; \
done
RUN zypper -n install nfs-client nfs4-acl-tools cifs-utils sg3_utils \
iproute2 qemu-tools e2fsprogs xfsprogs util-linux-systemd python311-base libcmocka-devel device-mapper netcat kmod jq util-linux procps fuse3-devel awk && \
@ -136,7 +115,9 @@ RUN zypper -n install nfs-client nfs4-acl-tools cifs-utils sg3_utils \
# Install SPDK dependencies
COPY --from=cbuilder /usr/src/spdk/scripts /usr/src/spdk/scripts
COPY --from=cbuilder /usr/src/spdk/include /usr/src/spdk/include
RUN bash /usr/src/spdk/scripts/pkgdep.sh
RUN for i in {1..10}; do \
bash /usr/src/spdk/scripts/pkgdep.sh && break || sleep 1; \
done
# Copy pre-built binaries from cbuilder and gobuilder
COPY --from=gobuilder \
@ -164,9 +145,13 @@ COPY --from=cbuilder \
/usr/local/lib64 \
/usr/local/lib64
COPY --from=cbuilder \
/usr/lib64 \
/usr/lib64
RUN ldconfig
COPY package/bin/longhorn-instance-manager /usr/local/bin/
COPY bin/longhorn-instance-manager /usr/local/bin/
COPY package/instance-manager /usr/local/bin/
COPY package/instance-manager-v2-prestop /usr/local/bin/

View File

@ -25,3 +25,12 @@ type ReplicaStorageInstance struct {
SpecSize uint64
ActualSize uint64
}
type DiskMetrics struct {
ReadThroughput uint64
WriteThroughput uint64
ReadLatency uint64
WriteLatency uint64
ReadIOPS uint64
WriteIOPS uint64
}

View File

@ -73,6 +73,8 @@ type InstanceStatus struct {
TargetPortEnd int32 `json:"targetPortEnd"`
StandbyTargetPortStart int32 `json:"standbyTargetPortStart"`
StandbyTargetPortEnd int32 `json:"standbyTargetPortEnd"`
UblkID int32 `json:"ublk_id"`
UUID string `json:"uuid"`
}
func RPCToInstanceStatus(obj *rpc.InstanceStatus) InstanceStatus {
@ -86,6 +88,8 @@ func RPCToInstanceStatus(obj *rpc.InstanceStatus) InstanceStatus {
TargetPortEnd: obj.TargetPortEnd,
StandbyTargetPortStart: obj.StandbyTargetPortStart,
StandbyTargetPortEnd: obj.StandbyTargetPortEnd,
UblkID: obj.UblkId,
UUID: obj.Uuid,
}
}

View File

@ -41,6 +41,7 @@ type ProcessStatus struct {
Conditions map[string]bool `json:"conditions"`
PortStart int32 `json:"portStart"`
PortEnd int32 `json:"portEnd"`
UUID string `json:"uuid"`
}
func RPCToProcessStatus(obj *rpc.ProcessStatus) ProcessStatus {
@ -50,6 +51,7 @@ func RPCToProcessStatus(obj *rpc.ProcessStatus) ProcessStatus {
Conditions: obj.Conditions,
PortStart: obj.PortStart,
PortEnd: obj.PortEnd,
UUID: obj.Uuid,
}
}

View File

@ -5,12 +5,14 @@ import (
"crypto/tls"
"fmt"
rpc "github.com/longhorn/types/pkg/generated/imrpc"
"github.com/pkg/errors"
"google.golang.org/grpc"
healthpb "google.golang.org/grpc/health/grpc_health_v1"
"google.golang.org/protobuf/types/known/emptypb"
healthpb "google.golang.org/grpc/health/grpc_health_v1"
rpc "github.com/longhorn/types/pkg/generated/imrpc"
"github.com/longhorn/longhorn-instance-manager/pkg/api"
"github.com/longhorn/longhorn-instance-manager/pkg/meta"
"github.com/longhorn/longhorn-instance-manager/pkg/types"
@ -268,3 +270,39 @@ func (c *DiskServiceClient) CheckConnection() error {
_, err := c.health.Check(getContextWithGRPCTimeout(c.ctx), req)
return err
}
// MetricsGet returns the disk metrics with the given name and path.
func (c *DiskServiceClient) MetricsGet(diskType, diskName, diskPath, diskDriver string) (*api.DiskMetrics, error) {
if diskName == "" {
return nil, fmt.Errorf("failed to get disk metrics: missing required parameter diskName")
}
t, ok := rpc.DiskType_value[diskType]
if !ok {
return nil, fmt.Errorf("failed to get disk metrics: invalid disk type %v", diskType)
}
client := c.getDiskServiceClient()
ctx, cancel := context.WithTimeout(context.Background(), types.GRPCServiceTimeout)
defer cancel()
resp, err := client.MetricsGet(ctx, &rpc.DiskGetRequest{
DiskType: rpc.DiskType(t),
DiskName: diskName,
DiskPath: diskPath,
DiskDriver: diskDriver,
})
if err != nil {
return nil, err
}
// Convert to api.DiskMetrics format
return &api.DiskMetrics{
ReadThroughput: resp.Metrics.ReadThroughput,
WriteThroughput: resp.Metrics.WriteThroughput,
ReadLatency: resp.Metrics.ReadLatency,
WriteLatency: resp.Metrics.WriteLatency,
ReadIOPS: resp.Metrics.ReadIOPS,
WriteIOPS: resp.Metrics.WriteIOPS,
}, nil
}

View File

@ -5,12 +5,14 @@ import (
"crypto/tls"
"fmt"
rpc "github.com/longhorn/types/pkg/generated/imrpc"
"github.com/pkg/errors"
"google.golang.org/grpc"
healthpb "google.golang.org/grpc/health/grpc_health_v1"
"google.golang.org/protobuf/types/known/emptypb"
healthpb "google.golang.org/grpc/health/grpc_health_v1"
rpc "github.com/longhorn/types/pkg/generated/imrpc"
"github.com/longhorn/longhorn-instance-manager/pkg/api"
"github.com/longhorn/longhorn-instance-manager/pkg/meta"
"github.com/longhorn/longhorn-instance-manager/pkg/types"
@ -190,8 +192,8 @@ func (c *InstanceServiceClient) InstanceCreate(req *InstanceCreateRequest) (*api
return api.RPCToInstance(p), nil
}
// InstanceDelete deletes the instance by name.
func (c *InstanceServiceClient) InstanceDelete(dataEngine, name, instanceType, diskUUID string, cleanupRequired bool) (*api.Instance, error) {
// InstanceDelete deletes the instance by name. UUID will be validated if not empty.
func (c *InstanceServiceClient) InstanceDelete(dataEngine, name, uuid, instanceType, diskUUID string, cleanupRequired bool) (*api.Instance, error) {
if name == "" {
return nil, fmt.Errorf("failed to delete instance: missing required parameter name")
}
@ -207,6 +209,7 @@ func (c *InstanceServiceClient) InstanceDelete(dataEngine, name, instanceType, d
p, err := client.InstanceDelete(ctx, &rpc.InstanceDeleteRequest{
Name: name,
Uuid: uuid,
Type: instanceType,
// nolint:all replaced with DataEngine
BackendStoreDriver: rpc.BackendStoreDriver(driver),

View File

@ -5,13 +5,15 @@ import (
"crypto/tls"
"fmt"
rpc "github.com/longhorn/types/pkg/generated/imrpc"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"google.golang.org/grpc"
healthpb "google.golang.org/grpc/health/grpc_health_v1"
"google.golang.org/protobuf/types/known/emptypb"
healthpb "google.golang.org/grpc/health/grpc_health_v1"
rpc "github.com/longhorn/types/pkg/generated/imrpc"
"github.com/longhorn/longhorn-instance-manager/pkg/api"
"github.com/longhorn/longhorn-instance-manager/pkg/meta"
"github.com/longhorn/longhorn-instance-manager/pkg/types"
@ -114,7 +116,7 @@ func (c *ProcessManagerClient) ProcessCreate(name, binary string, portCount int,
})
}
func (c *ProcessManagerClient) ProcessDelete(name string) (*rpc.ProcessResponse, error) {
func (c *ProcessManagerClient) ProcessDelete(name, uuid string) (*rpc.ProcessResponse, error) {
if name == "" {
return nil, fmt.Errorf("failed to delete process: missing required parameter name")
}
@ -125,6 +127,7 @@ func (c *ProcessManagerClient) ProcessDelete(name string) (*rpc.ProcessResponse,
return client.ProcessDelete(ctx, &rpc.ProcessDeleteRequest{
Name: name,
Uuid: uuid,
})
}

View File

@ -5,16 +5,18 @@ import (
"fmt"
"time"
emeta "github.com/longhorn/longhorn-engine/pkg/meta"
eclient "github.com/longhorn/longhorn-engine/pkg/replica/client"
rpc "github.com/longhorn/types/pkg/generated/imrpc"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/connectivity"
healthpb "google.golang.org/grpc/health/grpc_health_v1"
emeta "github.com/longhorn/longhorn-engine/pkg/meta"
eclient "github.com/longhorn/longhorn-engine/pkg/replica/client"
rpc "github.com/longhorn/types/pkg/generated/imrpc"
"github.com/longhorn/longhorn-instance-manager/pkg/meta"
"github.com/longhorn/longhorn-instance-manager/pkg/util"
)

View File

@ -4,9 +4,10 @@ import (
"encoding/json"
"fmt"
rpc "github.com/longhorn/types/pkg/generated/imrpc"
"github.com/pkg/errors"
"google.golang.org/protobuf/types/known/emptypb"
rpc "github.com/longhorn/types/pkg/generated/imrpc"
)
func (c *ProxyClient) CleanupBackupMountPoints() (err error) {

View File

@ -1,21 +1,29 @@
package client
import (
"fmt"
"github.com/pkg/errors"
rpc "github.com/longhorn/types/pkg/generated/imrpc"
)
func (c *ProxyClient) MetricsGet(engineName, volumeName, serviceAddress string) (metrics *Metrics, err error) {
func (c *ProxyClient) MetricsGet(dataEngine, engineName, volumeName, serviceAddress string) (metrics *Metrics, err error) {
input := map[string]string{
"engineName": engineName,
"volumeName": volumeName,
"serviceAddress": serviceAddress,
"dataEngine": dataEngine,
}
if err := validateProxyMethodParameters(input); err != nil {
return nil, errors.Wrap(err, "failed to get metrics for volume")
}
driver, ok := rpc.DataEngine_value[getDataEngine(dataEngine)]
if !ok {
return nil, fmt.Errorf("failed to get metrics for volume: invalid data engine %v", dataEngine)
}
defer func() {
err = errors.Wrapf(err, "%v failed to get metrics for volume", c.getProxyErrorPrefix(serviceAddress))
}()
@ -24,6 +32,7 @@ func (c *ProxyClient) MetricsGet(engineName, volumeName, serviceAddress string)
Address: serviceAddress,
EngineName: engineName,
VolumeName: volumeName,
DataEngine: rpc.DataEngine(driver),
}
resp, err := c.service.MetricsGet(getContextWithGRPCTimeout(c.ctx), req)
if err != nil {

View File

@ -3,9 +3,10 @@ package client
import (
"fmt"
"github.com/pkg/errors"
etypes "github.com/longhorn/longhorn-engine/pkg/types"
rpc "github.com/longhorn/types/pkg/generated/imrpc"
"github.com/pkg/errors"
)
func (c *ProxyClient) ReplicaAdd(dataEngine, engineName, volumeName, serviceAddress, replicaName,
@ -158,6 +159,41 @@ func (c *ProxyClient) ReplicaRebuildingStatus(dataEngine, engineName, volumeName
return status, nil
}
func (c *ProxyClient) ReplicaRebuildingQosSet(dataEngine, engineName, volumeName,
serviceAddress string, qosLimitMbps int64) (err error) {
input := map[string]string{
"engineName": engineName,
"volumeName": volumeName,
"serviceAddress": serviceAddress,
}
if err := validateProxyMethodParameters(input); err != nil {
return errors.Wrap(err, "failed to set replicas rebuilding qos set")
}
driver, ok := rpc.DataEngine_value[getDataEngine(dataEngine)]
if !ok {
return fmt.Errorf("failed to set replicas rebuilding qos set: invalid data engine %v", dataEngine)
}
defer func() {
err = errors.Wrapf(err, "%v failed to set replicas rebuilding qos set", c.getProxyErrorPrefix(serviceAddress))
}()
req := &rpc.EngineReplicaRebuildingQosSetRequest{
ProxyEngineRequest: &rpc.ProxyEngineRequest{
Address: serviceAddress,
EngineName: engineName,
// nolint:all replaced with DataEngine
BackendStoreDriver: rpc.BackendStoreDriver(driver),
DataEngine: rpc.DataEngine(driver),
VolumeName: volumeName,
},
QosLimitMbps: qosLimitMbps,
}
_, err = c.service.ReplicaRebuildingQosSet(getContextWithGRPCTimeout(c.ctx), req)
return err
}
func (c *ProxyClient) ReplicaVerifyRebuild(dataEngine, engineName, volumeName, serviceAddress,
replicaAddress, replicaName string) (err error) {
input := map[string]string{

View File

@ -67,11 +67,12 @@ type EngineBackupInfo struct {
}
type ReplicaRebuildStatus struct {
Error string
IsRebuilding bool
Progress int
State string
FromReplicaAddress string
Error string
IsRebuilding bool
Progress int
State string
FromReplicaAddress string
AppliedRebuildingMBps int64
}
type SnapshotHashStatus struct {

View File

@ -8,6 +8,7 @@ import (
"github.com/longhorn/longhorn-spdk-engine/pkg/api"
spdkclient "github.com/longhorn/longhorn-spdk-engine/pkg/client"
enginerpc "github.com/longhorn/types/pkg/generated/enginerpc"
rpc "github.com/longhorn/types/pkg/generated/imrpc"
spdkrpc "github.com/longhorn/types/pkg/generated/spdkrpc"
"github.com/pkg/errors"
@ -31,6 +32,7 @@ type DiskOps interface {
DiskGet(req *rpc.DiskGetRequest) (*rpc.Disk, error)
DiskReplicaInstanceList(*rpc.DiskReplicaInstanceListRequest) (*rpc.DiskReplicaInstanceListResponse, error)
DiskReplicaInstanceDelete(*rpc.DiskReplicaInstanceDeleteRequest) (*emptypb.Empty, error)
MetricsGet(*rpc.DiskGetRequest) (*rpc.DiskMetricsGetReply, error)
}
type FilesystemDiskOps struct{}
@ -267,6 +269,49 @@ func (ops BlockDiskOps) DiskReplicaInstanceDelete(req *rpc.DiskReplicaInstanceDe
return &emptypb.Empty{}, nil
}
func (s *Server) MetricsGet(ctx context.Context, req *rpc.DiskGetRequest) (*rpc.DiskMetricsGetReply, error) {
log := logrus.WithFields(logrus.Fields{
"diskType": req.DiskType,
"diskName": req.DiskName,
"diskPath": req.DiskPath,
})
log.Trace("Disk Server: Getting disk metrics")
if req.DiskName == "" {
return nil, grpcstatus.Error(grpccodes.InvalidArgument, "disk name is required")
}
ops, ok := s.ops[req.DiskType]
if !ok {
return nil, grpcstatus.Errorf(grpccodes.Unimplemented, "unsupported disk type %v", req.DiskType)
}
return ops.MetricsGet(req)
}
func (ops FilesystemDiskOps) MetricsGet(req *rpc.DiskGetRequest) (*rpc.DiskMetricsGetReply, error) {
return nil, grpcstatus.Errorf(grpccodes.Unimplemented, "unsupported disk type %v", req.DiskType)
}
func (ops BlockDiskOps) MetricsGet(req *rpc.DiskGetRequest) (*rpc.DiskMetricsGetReply, error) {
metrics, err := ops.spdkClient.MetricsGet(req.DiskName)
if err != nil {
return nil, grpcstatus.Error(grpccodes.Internal, err.Error())
}
// Convert SPDK metrics to ptypes.Metrics format
return &rpc.DiskMetricsGetReply{
Metrics: &enginerpc.Metrics{
ReadThroughput: metrics.ReadThroughput,
WriteThroughput: metrics.WriteThroughput,
ReadLatency: metrics.ReadLatency,
WriteLatency: metrics.WriteLatency,
ReadIOPS: metrics.ReadIOPS,
WriteIOPS: metrics.WriteIOPS,
},
}, nil
}
func spdkDiskToDisk(disk *spdkrpc.Disk) *rpc.Disk {
return &rpc.Disk{
Id: disk.Id,

View File

@ -55,3 +55,13 @@ func (hc *CheckDiskServer) Watch(req *healthpb.HealthCheckRequest, ws healthpb.H
time.Sleep(time.Second)
}
}
func (hc *CheckDiskServer) List(context.Context, *healthpb.HealthListRequest) (*healthpb.HealthListResponse, error) {
return &healthpb.HealthListResponse{
Statuses: map[string]*healthpb.HealthCheckResponse{
"grpc": {
Status: healthpb.HealthCheckResponse_SERVING,
},
},
}, nil
}

View File

@ -30,7 +30,7 @@ func (hc *CheckServer) Check(context.Context, *healthpb.HealthCheckRequest) (*he
return &healthpb.HealthCheckResponse{
Status: healthpb.HealthCheckResponse_NOT_SERVING,
}, fmt.Errorf("Engine Manager or Process Manager or Instance Manager is not running")
}, fmt.Errorf("engine Manager or Process Manager or Instance Manager is not running")
}
func (hc *CheckServer) Watch(req *healthpb.HealthCheckRequest, ws healthpb.Health_WatchServer) error {
@ -53,3 +53,13 @@ func (hc *CheckServer) Watch(req *healthpb.HealthCheckRequest, ws healthpb.Healt
time.Sleep(time.Second)
}
}
func (hc *CheckServer) List(context.Context, *healthpb.HealthListRequest) (*healthpb.HealthListResponse, error) {
return &healthpb.HealthListResponse{
Statuses: map[string]*healthpb.HealthCheckResponse{
"grpc": {
Status: healthpb.HealthCheckResponse_SERVING,
},
},
}, nil
}

View File

@ -55,3 +55,13 @@ func (hc *CheckInstanceServer) Watch(req *healthpb.HealthCheckRequest, ws health
time.Sleep(time.Second)
}
}
func (hc *CheckInstanceServer) List(context.Context, *healthpb.HealthListRequest) (*healthpb.HealthListResponse, error) {
return &healthpb.HealthListResponse{
Statuses: map[string]*healthpb.HealthCheckResponse{
"grpc": {
Status: healthpb.HealthCheckResponse_SERVING,
},
},
}, nil
}

View File

@ -55,3 +55,13 @@ func (hc *CheckProxyServer) Watch(req *healthpb.HealthCheckRequest, ws healthpb.
time.Sleep(time.Second)
}
}
func (hc *CheckProxyServer) List(context.Context, *healthpb.HealthListRequest) (*healthpb.HealthListResponse, error) {
return &healthpb.HealthListResponse{
Statuses: map[string]*healthpb.HealthCheckResponse{
"grpc": {
Status: healthpb.HealthCheckResponse_SERVING,
},
},
}, nil
}

View File

@ -56,3 +56,13 @@ func (hc *CheckSPDKServer) Watch(req *healthpb.HealthCheckRequest, ws healthpb.H
time.Sleep(time.Second)
}
}
func (hc *CheckSPDKServer) List(context.Context, *healthpb.HealthListRequest) (*healthpb.HealthListResponse, error) {
return &healthpb.HealthListResponse{
Statuses: map[string]*healthpb.HealthCheckResponse{
"grpc": {
Status: healthpb.HealthCheckResponse_SERVING,
},
},
}, nil
}

View File

@ -133,7 +133,17 @@ func (ops V1DataEngineInstanceOps) InstanceCreate(req *rpc.InstanceCreateRequest
if err != nil {
return nil, grpcstatus.Error(grpccodes.Internal, errors.Wrapf(err, "failed to create ProcessManagerClient").Error())
}
defer pmClient.Close()
defer func() {
if closeErr := pmClient.Close(); closeErr != nil {
logrus.WithFields(logrus.Fields{
"name": req.Spec.Name,
"type": req.Spec.Type,
"dataEngine": req.Spec.DataEngine,
"upgradeRequired": req.Spec.UpgradeRequired,
}).WithError(closeErr).Warn("Failed to close ProcessManager client")
}
}()
process, err := pmClient.ProcessCreate(req.Spec.Name, req.Spec.ProcessInstanceSpec.Binary, int(req.Spec.PortCount), req.Spec.ProcessInstanceSpec.Args, req.Spec.PortArgs)
if err != nil {
@ -147,7 +157,16 @@ func (ops V2DataEngineInstanceOps) InstanceCreate(req *rpc.InstanceCreateRequest
if err != nil {
return nil, grpcstatus.Error(grpccodes.Internal, errors.Wrapf(err, "failed to create SPDK client").Error())
}
defer c.Close()
defer func() {
if closeErr := c.Close(); closeErr != nil {
logrus.WithFields(logrus.Fields{
"name": req.Spec.Name,
"type": req.Spec.Type,
"dataEngine": req.Spec.DataEngine,
"upgradeRequired": req.Spec.UpgradeRequired,
}).WithError(closeErr).Warn("Failed to close SPDK client")
}
}()
switch req.Spec.Type {
case types.InstanceTypeEngine:
@ -171,6 +190,7 @@ func (ops V2DataEngineInstanceOps) InstanceCreate(req *rpc.InstanceCreateRequest
func (s *Server) InstanceDelete(ctx context.Context, req *rpc.InstanceDeleteRequest) (*rpc.InstanceResponse, error) {
logrus.WithFields(logrus.Fields{
"name": req.Name,
"uuid": req.Uuid,
"type": req.Type,
"dataEngine": req.DataEngine,
"diskUuid": req.DiskUuid,
@ -191,9 +211,20 @@ func (ops V1DataEngineInstanceOps) InstanceDelete(req *rpc.InstanceDeleteRequest
if err != nil {
return nil, grpcstatus.Error(grpccodes.Internal, errors.Wrapf(err, "failed to create ProcessManagerClient").Error())
}
defer pmClient.Close()
defer func() {
if closeErr := pmClient.Close(); closeErr != nil {
logrus.WithFields(logrus.Fields{
"name": req.Name,
"uuid": req.Uuid,
"type": req.Type,
"dataEngine": req.DataEngine,
"diskUuid": req.DiskUuid,
"cleanupRequired": req.CleanupRequired,
}).WithError(closeErr).Warn("Failed to close SPDK client")
}
}()
process, err := pmClient.ProcessDelete(req.Name)
process, err := pmClient.ProcessDelete(req.Name, req.Uuid)
if err != nil {
return nil, err
}
@ -205,7 +236,22 @@ func (ops V2DataEngineInstanceOps) InstanceDelete(req *rpc.InstanceDeleteRequest
if err != nil {
return nil, grpcstatus.Error(grpccodes.Internal, errors.Wrapf(err, "failed to create SPDK client").Error())
}
defer c.Close()
defer func() {
if closeErr := c.Close(); closeErr != nil {
logrus.WithFields(logrus.Fields{
"name": req.Name,
"uuid": req.Uuid,
"type": req.Type,
"dataEngine": req.DataEngine,
"diskUuid": req.DiskUuid,
"cleanupRequired": req.CleanupRequired,
}).WithError(closeErr).Warn("Failed to close SPDK Client")
}
}()
if req.Uuid != "" {
logrus.Debugf("Deleting instance %v with UUID %v", req.Name, req.Uuid)
}
switch req.Type {
case types.InstanceTypeEngine:
@ -253,7 +299,15 @@ func (ops V1DataEngineInstanceOps) InstanceGet(req *rpc.InstanceGetRequest) (*rp
if err != nil {
return nil, grpcstatus.Error(grpccodes.Internal, errors.Wrapf(err, "failed to create ProcessManagerClient").Error())
}
defer pmClient.Close()
defer func() {
if closeErr := pmClient.Close(); closeErr != nil {
logrus.WithFields(logrus.Fields{
"name": req.Name,
"type": req.Type,
"dataEngine": req.DataEngine,
}).WithError(closeErr).Warn("Failed to ProcessManager client")
}
}()
process, err := pmClient.ProcessGet(req.Name)
if err != nil {
@ -267,7 +321,15 @@ func (ops V2DataEngineInstanceOps) InstanceGet(req *rpc.InstanceGetRequest) (*rp
if err != nil {
return nil, grpcstatus.Error(grpccodes.Internal, errors.Wrapf(err, "failed to create SPDK client").Error())
}
defer c.Close()
defer func() {
if closeErr := c.Close(); closeErr != nil {
logrus.WithFields(logrus.Fields{
"name": req.Name,
"type": req.Type,
"dataEngine": req.DataEngine,
}).WithError(closeErr).Warn("Failed to close SPDK client")
}
}()
switch req.Type {
case types.InstanceTypeEngine:
@ -316,7 +378,11 @@ func (ops V1DataEngineInstanceOps) InstanceList(instances map[string]*rpc.Instan
if err != nil {
return grpcstatus.Error(grpccodes.Internal, errors.Wrapf(err, "failed to create ProcessManagerClient").Error())
}
defer pmClient.Close()
defer func() {
if closeErr := pmClient.Close(); closeErr != nil {
logrus.WithError(closeErr).Warn("Failed to close ProcessManager client")
}
}()
processes, err := pmClient.ProcessList()
if err != nil {
@ -337,7 +403,11 @@ func (ops V2DataEngineInstanceOps) InstanceList(instances map[string]*rpc.Instan
if err != nil {
return grpcstatus.Error(grpccodes.Internal, errors.Wrapf(err, "failed to create SPDK client").Error())
}
defer c.Close()
defer func() {
if closeErr := c.Close(); closeErr != nil {
logrus.WithError(closeErr).Warn("Failed to close SPDK client")
}
}()
replicas, err := c.ReplicaList()
if err != nil {
@ -382,7 +452,15 @@ func (ops V1DataEngineInstanceOps) InstanceReplace(req *rpc.InstanceReplaceReque
if err != nil {
return nil, grpcstatus.Error(grpccodes.Internal, errors.Wrapf(err, "failed to create ProcessManagerClient").Error())
}
defer pmClient.Close()
defer func() {
if closeErr := pmClient.Close(); closeErr != nil {
logrus.WithFields(logrus.Fields{
"name": req.Spec.Name,
"type": req.Spec.Type,
"dataEngine": req.Spec.DataEngine,
}).WithError(closeErr).Warn("Failed to close ProcessManager client")
}
}()
process, err := pmClient.ProcessReplace(req.Spec.Name,
req.Spec.ProcessInstanceSpec.Binary, int(req.Spec.PortCount), req.Spec.ProcessInstanceSpec.Args, req.Spec.PortArgs, req.TerminateSignal)
@ -418,7 +496,15 @@ func (ops V1DataEngineInstanceOps) InstanceLog(req *rpc.InstanceLogRequest, srv
if err != nil {
return grpcstatus.Error(grpccodes.Internal, errors.Wrapf(err, "failed to create ProcessManagerClient").Error())
}
defer pmClient.Close()
defer func() {
if closeErr := pmClient.Close(); closeErr != nil {
logrus.WithFields(logrus.Fields{
"name": req.Name,
"type": req.Type,
"dataEngine": req.DataEngine,
}).WithError(closeErr).Warn("Failed to close ProcessManager client")
}
}()
stream, err := pmClient.ProcessLog(context.Background(), req.Name)
if err != nil {
@ -473,9 +559,13 @@ func (s *Server) InstanceWatch(req *emptypb.Empty, srv rpc.InstanceService_Insta
for name, c := range clients {
switch c := c.(type) {
case *client.ProcessManagerClient:
c.Close()
if closeErr := c.Close(); closeErr != nil {
logrus.WithError(closeErr).Warn("Failed to close ProcessManager client")
}
case *spdkclient.SPDKClient:
c.Close()
if closeErr := c.Close(); closeErr != nil {
logrus.WithError(closeErr).Warn("Failed to close SPDK client")
}
}
delete(clients, name)
}
@ -686,6 +776,7 @@ func processResponseToInstanceResponse(p *rpc.ProcessResponse, processType strin
TargetPortEnd: targetPortEnd,
ErrorMsg: p.Status.ErrorMsg,
Conditions: p.Status.Conditions,
Uuid: p.Status.Uuid,
},
Deleted: p.Deleted,
}
@ -706,6 +797,7 @@ func replicaResponseToInstanceResponse(r *spdkapi.Replica) *rpc.InstanceResponse
PortStart: r.PortStart,
PortEnd: r.PortEnd,
Conditions: make(map[string]bool),
Uuid: r.UUID,
},
}
}
@ -729,6 +821,8 @@ func engineResponseToInstanceResponse(e *spdkapi.Engine) *rpc.InstanceResponse {
StandbyTargetPortStart: e.StandbyTargetPort,
StandbyTargetPortEnd: e.StandbyTargetPort,
Conditions: make(map[string]bool),
UblkId: e.UblkID,
Uuid: e.UUID,
},
}
}
@ -756,7 +850,15 @@ func (ops V2DataEngineInstanceOps) InstanceSuspend(req *rpc.InstanceSuspendReque
if err != nil {
return nil, grpcstatus.Error(grpccodes.Internal, errors.Wrapf(err, "failed to create SPDK client").Error())
}
defer c.Close()
defer func() {
if closeErr := c.Close(); closeErr != nil {
logrus.WithFields(logrus.Fields{
"name": req.Name,
"type": req.Type,
"dataEngine": req.DataEngine,
}).WithError(closeErr).Warn("Failed to close SPDK client")
}
}()
switch req.Type {
case types.InstanceTypeEngine:
@ -795,7 +897,15 @@ func (ops V2DataEngineInstanceOps) InstanceResume(req *rpc.InstanceResumeRequest
if err != nil {
return nil, grpcstatus.Error(grpccodes.Internal, errors.Wrapf(err, "failed to create SPDK client").Error())
}
defer c.Close()
defer func() {
if closeErr := c.Close(); closeErr != nil {
logrus.WithFields(logrus.Fields{
"name": req.Name,
"type": req.Type,
"dataEngine": req.DataEngine,
}).WithError(closeErr).Warn("Failed to close SPDK client")
}
}()
switch req.Type {
case types.InstanceTypeEngine:
@ -835,7 +945,16 @@ func (ops V2DataEngineInstanceOps) InstanceSwitchOverTarget(req *rpc.InstanceSwi
if err != nil {
return nil, grpcstatus.Error(grpccodes.Internal, errors.Wrapf(err, "failed to create SPDK client").Error())
}
defer c.Close()
defer func() {
if closeErr := c.Close(); closeErr != nil {
logrus.WithFields(logrus.Fields{
"name": req.Name,
"type": req.Type,
"dataEngine": req.DataEngine,
"targetAddress": req.TargetAddress,
}).WithError(closeErr).Warn("Failed to close SPDK client")
}
}()
switch req.Type {
case types.InstanceTypeEngine:
@ -874,7 +993,15 @@ func (ops V2DataEngineInstanceOps) InstanceDeleteTarget(req *rpc.InstanceDeleteT
if err != nil {
return nil, grpcstatus.Error(grpccodes.Internal, errors.Wrapf(err, "failed to create SPDK client").Error())
}
defer c.Close()
defer func() {
if closeErr := c.Close(); closeErr != nil {
logrus.WithFields(logrus.Fields{
"name": req.Name,
"type": req.Type,
"dataEngine": req.DataEngine,
}).WithError(closeErr).Warn("Failed to close SPDK client")
}
}()
switch req.Type {
case types.InstanceTypeEngine:

View File

@ -6,12 +6,13 @@ import (
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
"google.golang.org/protobuf/types/known/emptypb"
grpccodes "google.golang.org/grpc/codes"
grpcstatus "google.golang.org/grpc/status"
spdkclient "github.com/longhorn/longhorn-spdk-engine/pkg/client"
rpc "github.com/longhorn/types/pkg/generated/imrpc"
grpccodes "google.golang.org/grpc/codes"
grpcstatus "google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/emptypb"
)
const (
@ -60,7 +61,11 @@ func (ops V2DataEngineInstanceOps) LogSetLevel(ctx context.Context, req *rpc.Log
if err != nil {
return nil, grpcstatus.Error(grpccodes.Internal, errors.Wrapf(err, "failed to create SPDK client").Error())
}
defer c.Close()
defer func() {
if closeErr := c.Close(); closeErr != nil {
logrus.WithError(closeErr).Warn("Failed to close SPDK client")
}
}()
err = c.LogSetLevel(spdkLevel)
if err != nil {
@ -88,7 +93,11 @@ func (ops V2DataEngineInstanceOps) LogSetFlags(ctx context.Context, req *rpc.Log
if err != nil {
return nil, grpcstatus.Error(grpccodes.Internal, errors.Wrapf(err, "failed to create SPDK client").Error())
}
defer c.Close()
defer func() {
if closeErr := c.Close(); closeErr != nil {
logrus.WithError(closeErr).Warn("Failed to close SPDK client")
}
}()
err = c.LogSetFlags(req.Flags)
if err != nil {
@ -135,7 +144,11 @@ func (ops V2DataEngineInstanceOps) LogGetFlags(ctx context.Context, req *rpc.Log
if err != nil {
return nil, grpcstatus.Error(grpccodes.Internal, errors.Wrapf(err, "failed to create SPDK client").Error())
}
defer c.Close()
defer func() {
if closeErr := c.Close(); closeErr != nil {
logrus.WithError(closeErr).Warn("Failed to close SPDK client")
}
}()
flags, err := c.LogGetFlags()
if err != nil {

View File

@ -129,6 +129,7 @@ func (p *Process) RPCResponse() *rpc.ProcessResponse {
PortStart: p.PortStart,
PortEnd: p.PortEnd,
Conditions: p.Conditions,
Uuid: p.UUID,
},
}
}

View File

@ -10,7 +10,6 @@ import (
"syscall"
"time"
rpc "github.com/longhorn/types/pkg/generated/imrpc"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
@ -22,6 +21,7 @@ import (
lhBitmap "github.com/longhorn/go-common-libs/bitmap"
lhKubernetes "github.com/longhorn/go-common-libs/kubernetes"
lhLonghorn "github.com/longhorn/go-common-libs/longhorn"
rpc "github.com/longhorn/types/pkg/generated/imrpc"
"github.com/longhorn/longhorn-instance-manager/pkg/types"
"github.com/longhorn/longhorn-instance-manager/pkg/util"
@ -230,14 +230,18 @@ func (pm *Manager) ProcessCreate(ctx context.Context, req *rpc.ProcessCreateRequ
}
// ProcessDelete will delete the process named by the request.
// If UUID is specified, the process will be deleted only if the UUID matches.
// If the process doesn't exist, the deletion will return with ErrorNotFound
func (pm *Manager) ProcessDelete(ctx context.Context, req *rpc.ProcessDeleteRequest) (ret *rpc.ProcessResponse, err error) {
logrus.Infof("Process Manager: prepare to delete process %v", req.Name)
logrus.Infof("Process Manager: prepare to delete process %v, UUID %v", req.Name, req.Uuid)
p := pm.findProcess(req.Name)
if p == nil {
return nil, status.Errorf(codes.NotFound, "cannot find process %v", req.Name)
}
if req.Uuid != "" && p.UUID != req.Uuid {
return nil, status.Errorf(codes.NotFound, "cannot find process %v with UUID %v", req.Name, req.Uuid)
}
p.Stop()

View File

@ -92,7 +92,9 @@ func (p *Proxy) SPDKBackingImageWatch(req *emptypb.Empty, srv rpc.ProxyEngineSer
<-done
logrus.Info("Stopped clients for watching SPDK backing image")
if spdkClient != nil {
spdkClient.Close()
if closeErr := spdkClient.Close(); closeErr != nil {
logrus.WithError(closeErr).Warn("Failed to close SPDK client")
}
}
close(done)
}()

View File

@ -9,9 +9,10 @@ import (
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
"google.golang.org/protobuf/types/known/emptypb"
grpccodes "google.golang.org/grpc/codes"
grpcstatus "google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/emptypb"
backupstore "github.com/longhorn/backupstore"
butil "github.com/longhorn/backupstore/util"
@ -95,7 +96,16 @@ func (ops V2DataEngineProxyOps) SnapshotBackup(ctx context.Context, req *rpc.Eng
if err != nil {
return nil, grpcstatus.Errorf(grpccodes.Internal, "failed to get SPDK client from engine address %v: %v", req.ProxyEngineRequest.Address, err)
}
defer c.Close()
defer func() {
if closeErr := c.Close(); closeErr != nil {
logrus.WithFields(logrus.Fields{
"serviceURL": req.ProxyEngineRequest.Address,
"engineName": req.ProxyEngineRequest.EngineName,
"volumeName": req.ProxyEngineRequest.VolumeName,
"dataEngine": req.ProxyEngineRequest.DataEngine,
}).WithError(closeErr).Warn("Failed to close SPDK client")
}
}()
snapshotName := req.SnapshotName
if snapshotName == "" {
@ -144,12 +154,23 @@ func (p *Proxy) SnapshotBackupStatus(ctx context.Context, req *rpc.EngineSnapsho
}
func (ops V1DataEngineProxyOps) SnapshotBackupStatus(ctx context.Context, req *rpc.EngineSnapshotBackupStatusRequest) (resp *rpc.EngineSnapshotBackupStatusProxyResponse, err error) {
log := logrus.WithFields(logrus.Fields{
"serviceURL": req.ProxyEngineRequest.Address,
"engineName": req.ProxyEngineRequest.EngineName,
"volumeName": req.ProxyEngineRequest.VolumeName,
"dataEngine": req.ProxyEngineRequest.DataEngine,
})
c, err := eclient.NewControllerClient(req.ProxyEngineRequest.Address, req.ProxyEngineRequest.VolumeName,
req.ProxyEngineRequest.EngineName)
if err != nil {
return nil, err
}
defer c.Close()
defer func() {
if closeErr := c.Close(); closeErr != nil {
log.WithError(closeErr).Warn("Failed to close Controller client")
}
}()
replicas, err := ops.ReplicaList(ctx, req.ProxyEngineRequest)
if err != nil {
@ -171,12 +192,14 @@ func (ops V1DataEngineProxyOps) SnapshotBackupStatus(ctx context.Context, req *r
cReplica, err := rclient.NewReplicaClient(r.Address.Address, req.ProxyEngineRequest.VolumeName,
r.Address.InstanceName)
if err != nil {
logrus.WithError(err).Debugf("Failed to create replica client with %v", r.Address.Address)
log.WithError(err).Debugf("Failed to create Replica client with %v", r.Address.Address)
continue
}
_, err = esync.FetchBackupStatus(cReplica, req.BackupName, r.Address.Address)
cReplica.Close()
if closeErr := cReplica.Close(); closeErr != nil {
log.WithError(closeErr).Warn("Failed to close Replica client")
}
if err == nil {
replicaAddress = r.Address.Address
break
@ -201,9 +224,13 @@ func (ops V1DataEngineProxyOps) SnapshotBackupStatus(ctx context.Context, req *r
// We may know replicaName here. If we don't, we pass an empty string, which disables validation.
cReplica, err := rclient.NewReplicaClient(replicaAddress, req.ProxyEngineRequest.VolumeName, req.ReplicaName)
if err != nil {
return nil, errors.Wrapf(err, "failed to create replica client with %v", replicaAddress)
return nil, errors.Wrapf(err, "failed to create Replica client with %v", replicaAddress)
}
defer cReplica.Close()
defer func() {
if closeErr := cReplica.Close(); closeErr != nil {
log.WithError(closeErr).Warn("Failed to close Replica client")
}
}()
status, err := esync.FetchBackupStatus(cReplica, req.BackupName, replicaAddress)
if err != nil {
@ -225,7 +252,16 @@ func (ops V2DataEngineProxyOps) SnapshotBackupStatus(ctx context.Context, req *r
if err != nil {
return nil, grpcstatus.Errorf(grpccodes.Internal, "failed to get SPDK client from engine address %v: %v", req.ProxyEngineRequest.Address, err)
}
defer c.Close()
defer func() {
if closeErr := c.Close(); closeErr != nil {
logrus.WithFields(logrus.Fields{
"serviceURL": req.ProxyEngineRequest.Address,
"engineName": req.ProxyEngineRequest.EngineName,
"volumeName": req.ProxyEngineRequest.VolumeName,
"dataEngine": req.ProxyEngineRequest.DataEngine,
}).WithError(closeErr).Warn("Failed to close SPDK client")
}
}()
status, err := c.EngineBackupStatus(req.BackupName, req.ProxyEngineRequest.EngineName, req.ReplicaAddress)
if err != nil {
@ -299,7 +335,16 @@ func (ops V2DataEngineProxyOps) BackupRestore(ctx context.Context, req *rpc.Engi
if err != nil {
return grpcstatus.Errorf(grpccodes.Internal, "failed to get SPDK client from engine address %v: %v", req.ProxyEngineRequest.Address, err)
}
defer c.Close()
defer func() {
if closeErr := c.Close(); closeErr != nil {
logrus.WithFields(logrus.Fields{
"serviceURL": req.ProxyEngineRequest.Address,
"engineName": req.ProxyEngineRequest.EngineName,
"volumeName": req.ProxyEngineRequest.VolumeName,
"dataEngine": req.ProxyEngineRequest.DataEngine,
}).WithError(closeErr).Warn("Failed to close SPDK client")
}
}()
return c.EngineBackupRestore(&spdkclient.BackupRestoreRequest{
BackupUrl: req.Url,
@ -360,7 +405,16 @@ func (ops V2DataEngineProxyOps) BackupRestoreStatus(ctx context.Context, req *rp
if err != nil {
return nil, grpcstatus.Errorf(grpccodes.Internal, "failed to get SPDK client from engine address %v: %v", req.Address, err)
}
defer c.Close()
defer func() {
if closeErr := c.Close(); closeErr != nil {
logrus.WithFields(logrus.Fields{
"serviceURL": req.Address,
"engineName": req.EngineName,
"volumeName": req.VolumeName,
"dataEngine": req.DataEngine,
}).WithError(closeErr).Warn("Failed to close SPDK client")
}
}()
recv, err := c.EngineRestoreStatus(req.EngineName)
if err != nil {

View File

@ -4,20 +4,46 @@ import (
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
eclient "github.com/longhorn/longhorn-engine/pkg/controller/client"
grpccodes "google.golang.org/grpc/codes"
grpcstatus "google.golang.org/grpc/status"
"github.com/longhorn/types/pkg/generated/enginerpc"
eclient "github.com/longhorn/longhorn-engine/pkg/controller/client"
rpc "github.com/longhorn/types/pkg/generated/imrpc"
)
func (p *Proxy) MetricsGet(ctx context.Context, req *rpc.ProxyEngineRequest) (resp *rpc.EngineMetricsGetProxyResponse, err error) {
log := logrus.WithFields(logrus.Fields{"serviceURL": req.Address})
log := logrus.WithFields(logrus.Fields{
"serviceURL": req.Address,
"volume": req.VolumeName,
"instance": req.EngineName,
"dataEngine": req.DataEngine,
})
log.Trace("Getting metrics")
ops, ok := p.ops[req.DataEngine]
if !ok {
return nil, grpcstatus.Errorf(grpccodes.Unimplemented, "unsupported data engine %v", req.DataEngine)
}
return ops.MetricsGet(ctx, req)
}
func (ops V1DataEngineProxyOps) MetricsGet(ctx context.Context, req *rpc.ProxyEngineRequest) (resp *rpc.EngineMetricsGetProxyResponse, err error) {
c, err := eclient.NewControllerClient(req.Address, req.VolumeName, req.EngineName)
if err != nil {
return nil, err
}
defer c.Close()
defer func() {
if closeErr := c.Close(); closeErr != nil {
logrus.WithFields(logrus.Fields{
"serviceURL": req.Address,
"volume": req.VolumeName,
"instance": req.EngineName,
"dataEngine": req.DataEngine,
}).WithError(closeErr).Warn("Failed to close Controller client")
}
}()
metrics, err := c.MetricsGet()
if err != nil {
@ -35,3 +61,36 @@ func (p *Proxy) MetricsGet(ctx context.Context, req *rpc.ProxyEngineRequest) (re
},
}, nil
}
func (ops V2DataEngineProxyOps) MetricsGet(ctx context.Context, req *rpc.ProxyEngineRequest) (resp *rpc.EngineMetricsGetProxyResponse, err error) {
c, err := getSPDKClientFromAddress(req.Address)
if err != nil {
return nil, grpcstatus.Errorf(grpccodes.Internal, "failed to get SPDK client from engine address %v: %v", req.Address, err)
}
defer func() {
if closeErr := c.Close(); closeErr != nil {
logrus.WithFields(logrus.Fields{
"serviceURL": req.Address,
"volume": req.VolumeName,
"instance": req.EngineName,
"dataEngine": req.DataEngine,
}).WithError(closeErr).Warn("Failed to close SPDK client")
}
}()
metrics, err := c.MetricsGet(req.EngineName)
if err != nil {
return nil, grpcstatus.Errorf(grpccodes.Internal, "failed to get engine %v: %v", req.EngineName, err)
}
return &rpc.EngineMetricsGetProxyResponse{
Metrics: &enginerpc.Metrics{
ReadThroughput: metrics.ReadThroughput,
WriteThroughput: metrics.WriteThroughput,
ReadLatency: metrics.ReadLatency,
WriteLatency: metrics.WriteLatency,
ReadIOPS: metrics.ReadIOPS,
WriteIOPS: metrics.WriteIOPS,
},
}, nil
}

View File

@ -31,6 +31,7 @@ type ProxyOps interface {
ReplicaAdd(context.Context, *rpc.EngineReplicaAddRequest) (*emptypb.Empty, error)
ReplicaList(context.Context, *rpc.ProxyEngineRequest) (*rpc.EngineReplicaListProxyResponse, error)
ReplicaRebuildingStatus(context.Context, *rpc.ProxyEngineRequest) (*rpc.EngineReplicaRebuildStatusProxyResponse, error)
ReplicaRebuildingQosSet(context.Context, *rpc.EngineReplicaRebuildingQosSetRequest) (*emptypb.Empty, error)
ReplicaRemove(context.Context, *rpc.EngineReplicaRemoveRequest) (*emptypb.Empty, error)
ReplicaVerifyRebuild(context.Context, *rpc.EngineReplicaVerifyRebuildRequest) (*emptypb.Empty, error)
ReplicaModeUpdate(context.Context, *rpc.EngineReplicaModeUpdateRequest) (*emptypb.Empty, error)
@ -52,6 +53,8 @@ type ProxyOps interface {
SnapshotBackupStatus(context.Context, *rpc.EngineSnapshotBackupStatusRequest) (*rpc.EngineSnapshotBackupStatusProxyResponse, error)
BackupRestore(context.Context, *rpc.EngineBackupRestoreRequest, map[string]string) error
BackupRestoreStatus(context.Context, *rpc.ProxyEngineRequest) (*rpc.EngineBackupRestoreStatusProxyResponse, error)
MetricsGet(context.Context, *rpc.ProxyEngineRequest) (*rpc.EngineMetricsGetProxyResponse, error)
}
type V1DataEngineProxyOps struct{}
@ -108,7 +111,11 @@ func (p *Proxy) ServerVersionGet(ctx context.Context, req *rpc.ProxyEngineReques
if err != nil {
return nil, err
}
defer c.Close()
defer func() {
if closeErr := c.Close(); closeErr != nil {
log.WithError(closeErr).Warn("Failed to close Controller client")
}
}()
recv, err := c.VersionDetailGet()
if err != nil {

View File

@ -1,21 +1,23 @@
package proxy
import (
"fmt"
"strings"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
"google.golang.org/protobuf/types/known/emptypb"
grpccodes "google.golang.org/grpc/codes"
grpcstatus "google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/emptypb"
"github.com/longhorn/types/pkg/generated/enginerpc"
eclient "github.com/longhorn/longhorn-engine/pkg/controller/client"
esync "github.com/longhorn/longhorn-engine/pkg/sync"
etypes "github.com/longhorn/longhorn-engine/pkg/types"
spdktypes "github.com/longhorn/longhorn-spdk-engine/pkg/types"
"github.com/longhorn/types/pkg/generated/enginerpc"
rpc "github.com/longhorn/types/pkg/generated/imrpc"
"github.com/longhorn/longhorn-instance-manager/pkg/types"
@ -75,7 +77,22 @@ func (ops V2DataEngineProxyOps) ReplicaAdd(ctx context.Context, req *rpc.EngineR
if err != nil {
return nil, grpcstatus.Errorf(grpccodes.Internal, "failed to get SPDK client from engine address %v: %v", req.ProxyEngineRequest.Address, err)
}
defer c.Close()
defer func() {
if closeErr := c.Close(); closeErr != nil {
logrus.WithFields(logrus.Fields{
"serviceURL": req.ProxyEngineRequest.Address,
"engineName": req.ProxyEngineRequest.EngineName,
"volumeName": req.ProxyEngineRequest.VolumeName,
"replicaName": req.ReplicaName,
"replicaAddress": req.ReplicaAddress,
"restore": req.Restore,
"size": req.Size,
"currentSize": req.CurrentSize,
"fastSync": req.FastSync,
"localSync": req.LocalSync,
}).WithError(closeErr).Warn("Failed to close SPDK client")
}
}()
replicaAddress := strings.TrimPrefix(req.ReplicaAddress, "tcp://")
@ -107,7 +124,16 @@ func (ops V1DataEngineProxyOps) ReplicaList(ctx context.Context, req *rpc.ProxyE
if err != nil {
return nil, err
}
defer c.Close()
defer func() {
if closeErr := c.Close(); closeErr != nil {
logrus.WithFields(logrus.Fields{
"serviceURL": req.Address,
"engineName": req.EngineName,
"volumeName": req.VolumeName,
"dataEngine": req.DataEngine,
}).WithError(closeErr).Warn("Failed to close Controller client")
}
}()
recv, err := c.ReplicaList()
if err != nil {
@ -149,7 +175,16 @@ func (ops V2DataEngineProxyOps) ReplicaList(ctx context.Context, req *rpc.ProxyE
if err != nil {
return nil, grpcstatus.Errorf(grpccodes.Internal, "failed to get SPDK client from engine address %v: %v", req.Address, err)
}
defer c.Close()
defer func() {
if closeErr := c.Close(); closeErr != nil {
logrus.WithFields(logrus.Fields{
"serviceURL": req.Address,
"engineName": req.EngineName,
"volumeName": req.VolumeName,
"dataEngine": req.DataEngine,
}).WithError(closeErr).Warn("Failed to close SPDK client")
}
}()
recv, err := c.EngineGet(req.EngineName)
if err != nil {
@ -164,7 +199,7 @@ func (ops V2DataEngineProxyOps) ReplicaList(ctx context.Context, req *rpc.ProxyE
}
replica := &enginerpc.ControllerReplica{
Address: &enginerpc.ReplicaAddress{
Address: address,
Address: types.AddTcpPrefixForAddress(address),
},
Mode: replicaModeToGRPCReplicaMode(mode),
}
@ -222,17 +257,29 @@ func (ops V1DataEngineProxyOps) ReplicaRebuildingStatus(ctx context.Context, req
}
func (ops V2DataEngineProxyOps) ReplicaRebuildingStatus(ctx context.Context, req *rpc.ProxyEngineRequest) (resp *rpc.EngineReplicaRebuildStatusProxyResponse, err error) {
log := logrus.WithFields(logrus.Fields{
"serviceURL": req.Address,
"engineName": req.EngineName,
"volumeName": req.VolumeName,
"dataEngine": req.DataEngine,
})
engineCli, err := getSPDKClientFromAddress(req.Address)
if err != nil {
return nil, grpcstatus.Errorf(grpccodes.Internal, "failed to get SPDK client from engine address %v: %v", req.Address, err)
}
defer engineCli.Close()
defer func() {
if closeErr := engineCli.Close(); closeErr != nil {
log.WithError(closeErr).Warn("Failed to close SPDK client")
}
}()
e, err := engineCli.EngineGet(req.EngineName)
if err != nil {
return nil, grpcstatus.Errorf(grpccodes.Internal, "failed to get engine %v: %v", req.EngineName, err)
}
// TODO: By design, there is one rebuilding replica at most for each volume; hence no need to return a map for rebuilding status.
resp = &rpc.EngineReplicaRebuildStatusProxyResponse{
Status: make(map[string]*enginerpc.ReplicaRebuildStatusResponse),
}
@ -250,14 +297,16 @@ func (ops V2DataEngineProxyOps) ReplicaRebuildingStatus(ctx context.Context, req
if err != nil {
return nil, grpcstatus.Errorf(grpccodes.Internal, "failed to get SPDK client from replica address %v: %v", replicaAddress, err)
}
defer replicaCli.Close()
defer func() {
if closeErr := replicaCli.Close(); closeErr != nil {
log.WithError(closeErr).Warn("Failed to close SPDK client")
}
}()
shallowCopyResp, err := replicaCli.ReplicaRebuildingDstShallowCopyCheck(replicaName)
if err != nil {
resp.Status[tcpReplicaAddress] = &enginerpc.ReplicaRebuildStatusResponse{
Error: fmt.Sprintf("failed to get replica rebuild status of %v: %v", replicaAddress, err),
}
continue
// Let the upper layer to handle this error rather than considering it as the error message of a rebuilding failure
return nil, err
}
resp.Status[tcpReplicaAddress] = &enginerpc.ReplicaRebuildStatusResponse{
Error: shallowCopyResp.Error,
@ -271,6 +320,93 @@ func (ops V2DataEngineProxyOps) ReplicaRebuildingStatus(ctx context.Context, req
return resp, nil
}
func (p *Proxy) ReplicaRebuildingQosSet(ctx context.Context, req *rpc.EngineReplicaRebuildingQosSetRequest) (resp *emptypb.Empty, err error) {
log := logrus.WithFields(logrus.Fields{
"serviceURL": req.ProxyEngineRequest.Address,
"engineName": req.ProxyEngineRequest.EngineName,
"volumeName": req.ProxyEngineRequest.VolumeName,
"dataEngine": req.ProxyEngineRequest.DataEngine,
"qosLimitMbps": req.QosLimitMbps,
})
log.Trace("Setting qos on replica rebuilding")
ops, ok := p.ops[req.ProxyEngineRequest.DataEngine]
if !ok {
return nil, grpcstatus.Errorf(grpccodes.Unimplemented, "unsupported data engine %v", req.ProxyEngineRequest.DataEngine)
}
return ops.ReplicaRebuildingQosSet(ctx, req)
}
func (ops V1DataEngineProxyOps) ReplicaRebuildingQosSet(ctx context.Context, req *rpc.EngineReplicaRebuildingQosSetRequest) (resp *emptypb.Empty, err error) {
return nil, grpcstatus.Errorf(grpccodes.Unimplemented, "unsupported data engine %v", req.ProxyEngineRequest.DataEngine)
}
func (ops V2DataEngineProxyOps) ReplicaRebuildingQosSet(ctx context.Context, req *rpc.EngineReplicaRebuildingQosSetRequest) (resp *emptypb.Empty, err error) {
log := logrus.WithFields(logrus.Fields{
"serviceURL": req.ProxyEngineRequest.Address,
"engineName": req.ProxyEngineRequest.EngineName,
"volumeName": req.ProxyEngineRequest.VolumeName,
"dataEngine": req.ProxyEngineRequest.DataEngine,
"qosLimitMbps": req.QosLimitMbps,
})
engineCli, err := getSPDKClientFromAddress(req.ProxyEngineRequest.Address)
if err != nil {
return nil, grpcstatus.Errorf(grpccodes.Internal, "failed to get SPDK client from engine address %v: %v", req.ProxyEngineRequest.Address, err)
}
defer func() {
if closeErr := engineCli.Close(); closeErr != nil {
log.WithError(closeErr).Warn("Failed to close SPDK engine client")
}
}()
engine, err := engineCli.EngineGet(req.ProxyEngineRequest.EngineName)
if err != nil {
return nil, grpcstatus.Errorf(grpccodes.Internal, "failed to get engine %v: %v", req.ProxyEngineRequest.EngineName, err)
}
for replicaName, mode := range engine.ReplicaModeMap {
if mode != spdktypes.ModeWO {
continue
}
replicaAddress := engine.ReplicaAddressMap[replicaName]
if replicaAddress == "" {
log.WithField("replicaName", replicaName).Warn("Empty replica address, skipping QoS set")
continue
}
replicaCli, err := getSPDKClientFromAddress(replicaAddress)
if err != nil {
log.WithError(err).WithField("replicaAddress", replicaAddress).
Warn("Failed to get SPDK client from replica address")
continue
}
defer func() {
if closeErr := replicaCli.Close(); closeErr != nil {
log.WithError(closeErr).Warn("Failed to close SPDK replica client")
}
}()
if err := replicaCli.ReplicaRebuildingDstSetQosLimit(replicaName, req.QosLimitMbps); err != nil {
log.WithError(err).WithFields(logrus.Fields{
"replicaName": replicaName,
"replicaAddr": replicaAddress,
"qosLimitMbps": req.QosLimitMbps,
}).Warn("Failed to set QoS on replica")
continue
}
log.WithFields(logrus.Fields{
"replicaName": replicaName,
"replicaAddr": replicaAddress,
"qosLimitMbps": req.QosLimitMbps,
}).Trace("Successfully set QoS on replica")
}
return &emptypb.Empty{}, nil
}
func (p *Proxy) ReplicaVerifyRebuild(ctx context.Context, req *rpc.EngineReplicaVerifyRebuildRequest) (resp *emptypb.Empty, err error) {
log := logrus.WithFields(logrus.Fields{
"serviceURL": req.ProxyEngineRequest.Address,
@ -327,7 +463,17 @@ func (ops V1DataEngineProxyOps) ReplicaRemove(ctx context.Context, req *rpc.Engi
if err != nil {
return nil, err
}
defer c.Close()
defer func() {
if closeErr := c.Close(); closeErr != nil {
logrus.WithFields(logrus.Fields{
"serviceURL": req.ProxyEngineRequest.Address,
"engineName": req.ProxyEngineRequest.EngineName,
"volumeName": req.ProxyEngineRequest.VolumeName,
"replicaName": req.ReplicaName,
"replicaAddress": req.ReplicaAddress,
}).WithError(closeErr).Warn("Failed to close Controller client")
}
}()
return nil, c.ReplicaDelete(req.ReplicaAddress)
}
@ -337,9 +483,21 @@ func (ops V2DataEngineProxyOps) ReplicaRemove(ctx context.Context, req *rpc.Engi
if err != nil {
return nil, grpcstatus.Errorf(grpccodes.Internal, "failed to get SPDK client from engine address %v: %v", req.ProxyEngineRequest.Address, err)
}
defer c.Close()
defer func() {
if closeErr := c.Close(); closeErr != nil {
logrus.WithFields(logrus.Fields{
"serviceURL": req.ProxyEngineRequest.Address,
"engineName": req.ProxyEngineRequest.EngineName,
"volumeName": req.ProxyEngineRequest.VolumeName,
"replicaName": req.ReplicaName,
"replicaAddress": req.ReplicaAddress,
}).WithError(closeErr).Warn("Failed to close SPDK client")
}
}()
return nil, c.EngineReplicaDelete(req.ProxyEngineRequest.EngineName, req.ReplicaName, req.ReplicaAddress)
replicaAddress := strings.TrimPrefix(req.ReplicaAddress, "tcp://")
return nil, c.EngineReplicaDelete(req.ProxyEngineRequest.EngineName, req.ReplicaName, replicaAddress)
}
func (p *Proxy) ReplicaModeUpdate(ctx context.Context, req *rpc.EngineReplicaModeUpdateRequest) (resp *emptypb.Empty, err error) {
@ -365,7 +523,11 @@ func (ops V1DataEngineProxyOps) ReplicaModeUpdate(ctx context.Context, req *rpc.
if err != nil {
return nil, err
}
defer c.Close()
defer func() {
if closeErr := c.Close(); closeErr != nil {
log.WithError(closeErr).Warn("Failed to close Controller client")
}
}()
if _, err = c.ReplicaUpdate(req.ReplicaAddress, etypes.GRPCReplicaModeToReplicaMode(req.Mode)); err != nil {
return nil, err

View File

@ -6,15 +6,18 @@ import (
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
"google.golang.org/protobuf/types/known/emptypb"
grpccodes "google.golang.org/grpc/codes"
grpcstatus "google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/emptypb"
"github.com/longhorn/types/pkg/generated/enginerpc"
eclient "github.com/longhorn/longhorn-engine/pkg/controller/client"
esync "github.com/longhorn/longhorn-engine/pkg/sync"
"github.com/longhorn/longhorn-instance-manager/pkg/util"
"github.com/longhorn/types/pkg/generated/enginerpc"
rpc "github.com/longhorn/types/pkg/generated/imrpc"
"github.com/longhorn/longhorn-instance-manager/pkg/util"
)
func (p *Proxy) VolumeSnapshot(ctx context.Context, req *rpc.EngineVolumeSnapshotRequest) (resp *rpc.EngineVolumeSnapshotProxyResponse, err error) {
@ -39,7 +42,16 @@ func (ops V1DataEngineProxyOps) VolumeSnapshot(ctx context.Context, req *rpc.Eng
if err != nil {
return nil, err
}
defer c.Close()
defer func() {
if closeErr := c.Close(); closeErr != nil {
logrus.WithFields(logrus.Fields{
"serviceURL": req.ProxyEngineRequest.Address,
"engineName": req.ProxyEngineRequest.EngineName,
"volumeName": req.ProxyEngineRequest.VolumeName,
"dataEngine": req.ProxyEngineRequest.DataEngine,
}).WithError(closeErr).Warn("Failed to close Controller client")
}
}()
recv, err := c.VolumeSnapshot(req.SnapshotVolume.Name, req.SnapshotVolume.Labels, req.SnapshotVolume.FreezeFilesystem)
if err != nil {
@ -58,7 +70,16 @@ func (ops V2DataEngineProxyOps) VolumeSnapshot(ctx context.Context, req *rpc.Eng
if err != nil {
return nil, grpcstatus.Errorf(grpccodes.Internal, "failed to get SPDK client from engine address %v: %v", req.ProxyEngineRequest.Address, err)
}
defer c.Close()
defer func() {
if closeErr := c.Close(); closeErr != nil {
logrus.WithFields(logrus.Fields{
"serviceURL": req.ProxyEngineRequest.Address,
"engineName": req.ProxyEngineRequest.EngineName,
"volumeName": req.ProxyEngineRequest.VolumeName,
"dataEngine": req.ProxyEngineRequest.DataEngine,
}).WithError(closeErr).Warn("Failed to close SPDK client")
}
}()
snapshotName := req.SnapshotVolume.Name
if snapshotName == "" {
@ -97,7 +118,16 @@ func (ops V1DataEngineProxyOps) SnapshotList(ctx context.Context, req *rpc.Proxy
if err != nil {
return nil, err
}
defer c.Close()
defer func() {
if closeErr := c.Close(); closeErr != nil {
logrus.WithFields(logrus.Fields{
"serviceURL": req.Address,
"engineName": req.EngineName,
"volumeName": req.VolumeName,
"dataEngine": req.DataEngine,
}).WithError(closeErr).Warn("Failed to close Controller client")
}
}()
recv, err := c.ReplicaList()
if err != nil {
@ -133,7 +163,16 @@ func (ops V2DataEngineProxyOps) SnapshotList(ctx context.Context, req *rpc.Proxy
if err != nil {
return nil, errors.Wrapf(err, "failed to get SPDK client from engine address %v", req.Address)
}
defer c.Close()
defer func() {
if closeErr := c.Close(); closeErr != nil {
logrus.WithFields(logrus.Fields{
"serviceURL": req.Address,
"engineName": req.EngineName,
"volumeName": req.VolumeName,
"dataEngine": req.DataEngine,
}).WithError(closeErr).Warn("Failed to close SPDK client")
}
}()
engine, err := c.EngineGet(req.EngineName)
if err != nil {
@ -187,18 +226,33 @@ func (p *Proxy) SnapshotClone(ctx context.Context, req *rpc.EngineSnapshotCloneR
}
func (ops V1DataEngineProxyOps) SnapshotClone(ctx context.Context, req *rpc.EngineSnapshotCloneRequest) (resp *emptypb.Empty, err error) {
log := logrus.WithFields(logrus.Fields{
"serviceURL": req.ProxyEngineRequest.Address,
"engineName": req.ProxyEngineRequest.EngineName,
"volumeName": req.ProxyEngineRequest.VolumeName,
"dataEngine": req.ProxyEngineRequest.DataEngine,
})
cFrom, err := eclient.NewControllerClient(req.FromEngineAddress, req.FromVolumeName, req.FromEngineName)
if err != nil {
return nil, err
}
defer cFrom.Close()
defer func() {
if closeErr := cFrom.Close(); closeErr != nil {
log.WithError(closeErr).Warn("Failed to close Controller client")
}
}()
cTo, err := eclient.NewControllerClient(req.ProxyEngineRequest.Address, req.ProxyEngineRequest.VolumeName,
req.ProxyEngineRequest.EngineName)
if err != nil {
return nil, err
}
defer cTo.Close()
defer func() {
if closeErr := cTo.Close(); closeErr != nil {
log.WithError(closeErr).Warn("Failed to close Controller client")
}
}()
err = esync.CloneSnapshot(cTo, cFrom, req.ProxyEngineRequest.VolumeName, req.FromVolumeName, req.SnapshotName,
req.ExportBackingImageIfExist, int(req.FileSyncHttpClientTimeout), req.GrpcTimeoutSeconds)
@ -234,7 +288,16 @@ func (ops V1DataEngineProxyOps) SnapshotCloneStatus(ctx context.Context, req *rp
if err != nil {
return nil, err
}
defer c.Close()
defer func() {
if closeErr := c.Close(); closeErr != nil {
logrus.WithFields(logrus.Fields{
"serviceURL": req.Address,
"engineName": req.EngineName,
"volumeName": req.VolumeName,
"dataEngine": req.DataEngine,
}).WithError(closeErr).Warn("Failed to close Controller client")
}
}()
recv, err := esync.CloneStatus(c, req.VolumeName)
if err != nil {
@ -287,7 +350,16 @@ func (ops V1DataEngineProxyOps) SnapshotRevert(ctx context.Context, req *rpc.Eng
if err != nil {
return nil, err
}
defer c.Close()
defer func() {
if closeErr := c.Close(); closeErr != nil {
logrus.WithFields(logrus.Fields{
"serviceURL": req.ProxyEngineRequest.Address,
"engineName": req.ProxyEngineRequest.EngineName,
"volumeName": req.ProxyEngineRequest.VolumeName,
"dataEngine": req.ProxyEngineRequest.DataEngine,
}).WithError(closeErr).Warn("Failed to close Controller client")
}
}()
if err := c.VolumeRevert(req.Name); err != nil {
return nil, err
@ -301,7 +373,16 @@ func (ops V2DataEngineProxyOps) SnapshotRevert(ctx context.Context, req *rpc.Eng
if err != nil {
return nil, grpcstatus.Errorf(grpccodes.Internal, "failed to get SPDK client from engine address %v: %v", req.ProxyEngineRequest.Address, err)
}
defer c.Close()
defer func() {
if closeErr := c.Close(); closeErr != nil {
logrus.WithFields(logrus.Fields{
"serviceURL": req.ProxyEngineRequest.Address,
"engineName": req.ProxyEngineRequest.EngineName,
"volumeName": req.ProxyEngineRequest.VolumeName,
"dataEngine": req.ProxyEngineRequest.DataEngine,
}).WithError(closeErr).Warn("Failed to close SPDK client")
}
}()
err = c.EngineSnapshotRevert(req.ProxyEngineRequest.EngineName, req.Name)
if err != nil {
@ -346,7 +427,16 @@ func (ops V2DataEngineProxyOps) SnapshotPurge(ctx context.Context, req *rpc.Engi
if err != nil {
return nil, grpcstatus.Errorf(grpccodes.Internal, "failed to get SPDK client from engine address %v: %v", req.ProxyEngineRequest.Address, err)
}
defer c.Close()
defer func() {
if closeErr := c.Close(); closeErr != nil {
logrus.WithFields(logrus.Fields{
"serviceURL": req.ProxyEngineRequest.Address,
"engineName": req.ProxyEngineRequest.EngineName,
"volumeName": req.ProxyEngineRequest.VolumeName,
"dataEngine": req.ProxyEngineRequest.DataEngine,
}).WithError(closeErr).Warn("Failed to close SPDK client")
}
}()
// For v2 Data Engine, snapshot purge is no longer a time-consuming operation
err = c.EngineSnapshotPurge(req.ProxyEngineRequest.EngineName)
@ -443,7 +533,16 @@ func (ops V2DataEngineProxyOps) SnapshotRemove(ctx context.Context, req *rpc.Eng
if err != nil {
return nil, grpcstatus.Errorf(grpccodes.Internal, "failed to get SPDK client from engine address %v: %v", req.ProxyEngineRequest.Address, err)
}
defer c.Close()
defer func() {
if closeErr := c.Close(); closeErr != nil {
logrus.WithFields(logrus.Fields{
"serviceURL": req.ProxyEngineRequest.Address,
"engineName": req.ProxyEngineRequest.EngineName,
"volumeName": req.ProxyEngineRequest.VolumeName,
"dataEngine": req.ProxyEngineRequest.DataEngine,
}).WithError(closeErr).Warn("Failed to close SPDK client")
}
}()
var lastErr error
for _, name := range req.Names {
@ -488,8 +587,23 @@ func (ops V1DataEngineProxyOps) SnapshotHash(ctx context.Context, req *rpc.Engin
}
func (ops V2DataEngineProxyOps) SnapshotHash(ctx context.Context, req *rpc.EngineSnapshotHashRequest) (resp *emptypb.Empty, err error) {
/* TODO: implement this */
return nil, grpcstatus.Errorf(grpccodes.Unimplemented, "not implemented")
c, err := getSPDKClientFromAddress(req.ProxyEngineRequest.Address)
if err != nil {
return nil, grpcstatus.Errorf(grpccodes.Internal, "failed to get SPDK client from engine address %v: %v", req.ProxyEngineRequest.Address, err)
}
defer func() {
if closeErr := c.Close(); closeErr != nil {
logrus.WithFields(logrus.Fields{
"serviceURL": req.ProxyEngineRequest.Address,
"engineName": req.ProxyEngineRequest.EngineName,
"volumeName": req.ProxyEngineRequest.VolumeName,
"dataEngine": req.ProxyEngineRequest.DataEngine,
}).WithError(closeErr).Warn("Failed to close SPDK client")
}
}()
err = c.EngineSnapshotHash(req.ProxyEngineRequest.EngineName, req.SnapshotName, req.Rehash)
return &emptypb.Empty{}, err
}
func (p *Proxy) SnapshotHashStatus(ctx context.Context, req *rpc.EngineSnapshotHashStatusRequest) (resp *rpc.EngineSnapshotHashStatusProxyResponse, err error) {
@ -536,6 +650,37 @@ func (ops V1DataEngineProxyOps) SnapshotHashStatus(ctx context.Context, req *rpc
}
func (ops V2DataEngineProxyOps) SnapshotHashStatus(ctx context.Context, req *rpc.EngineSnapshotHashStatusRequest) (resp *rpc.EngineSnapshotHashStatusProxyResponse, err error) {
/* TODO: implement this */
return nil, grpcstatus.Errorf(grpccodes.Unimplemented, "not implemented")
c, err := getSPDKClientFromAddress(req.ProxyEngineRequest.Address)
if err != nil {
return nil, grpcstatus.Errorf(grpccodes.Internal, "failed to get SPDK client from engine address %v: %v", req.ProxyEngineRequest.Address, err)
}
defer func() {
if closeErr := c.Close(); closeErr != nil {
logrus.WithFields(logrus.Fields{
"serviceURL": req.ProxyEngineRequest.Address,
"engineName": req.ProxyEngineRequest.EngineName,
"volumeName": req.ProxyEngineRequest.VolumeName,
"dataEngine": req.ProxyEngineRequest.DataEngine,
}).WithError(closeErr).Warn("Failed to close SPDK client")
}
}()
recv, err := c.EngineSnapshotHashStatus(req.ProxyEngineRequest.EngineName, req.SnapshotName)
if err != nil {
return nil, err
}
resp = &rpc.EngineSnapshotHashStatusProxyResponse{
Status: map[string]*enginerpc.SnapshotHashStatusResponse{},
}
for k, v := range recv.Status {
resp.Status[k] = &enginerpc.SnapshotHashStatusResponse{
State: v.State,
Checksum: v.Checksum,
Error: v.Error,
SilentlyCorrupted: v.SilentlyCorrupted,
}
}
return resp, nil
}

View File

@ -4,16 +4,19 @@ import (
"crypto/sha256"
"encoding/hex"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
"google.golang.org/protobuf/types/known/emptypb"
grpccodes "google.golang.org/grpc/codes"
grpcstatus "google.golang.org/grpc/status"
"github.com/longhorn/types/pkg/generated/enginerpc"
lhns "github.com/longhorn/go-common-libs/ns"
lhtypes "github.com/longhorn/go-common-libs/types"
eclient "github.com/longhorn/longhorn-engine/pkg/controller/client"
"github.com/longhorn/types/pkg/generated/enginerpc"
rpc "github.com/longhorn/types/pkg/generated/imrpc"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
grpccodes "google.golang.org/grpc/codes"
grpcstatus "google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/emptypb"
"github.com/longhorn/longhorn-instance-manager/pkg/util"
)
@ -39,7 +42,16 @@ func (ops V1DataEngineProxyOps) VolumeGet(ctx context.Context, req *rpc.ProxyEng
if err != nil {
return nil, err
}
defer c.Close()
defer func() {
if closeErr := c.Close(); closeErr != nil {
logrus.WithFields(logrus.Fields{
"serviceURL": req.Address,
"engineName": req.EngineName,
"volumeName": req.VolumeName,
"dataEngine": req.DataEngine,
}).WithError(closeErr).Warn("Failed to close Controller client")
}
}()
recv, err := c.VolumeGet()
if err != nil {
@ -69,7 +81,16 @@ func (ops V2DataEngineProxyOps) VolumeGet(ctx context.Context, req *rpc.ProxyEng
if err != nil {
return nil, grpcstatus.Errorf(grpccodes.Internal, "failed to get SPDK client from engine address %v: %v", req.Address, err)
}
defer c.Close()
defer func() {
if closeErr := c.Close(); closeErr != nil {
logrus.WithFields(logrus.Fields{
"serviceURL": req.Address,
"engineName": req.EngineName,
"volumeName": req.VolumeName,
"dataEngine": req.DataEngine,
}).WithError(closeErr).Warn("Failed to close SPDK client")
}
}()
recv, err := c.EngineGet(req.EngineName)
if err != nil {
@ -114,7 +135,16 @@ func (ops V1DataEngineProxyOps) VolumeExpand(ctx context.Context, req *rpc.Engin
if err != nil {
return nil, err
}
defer c.Close()
defer func() {
if closeErr := c.Close(); closeErr != nil {
logrus.WithFields(logrus.Fields{
"serviceURL": req.ProxyEngineRequest.Address,
"engineName": req.ProxyEngineRequest.EngineName,
"volumeName": req.ProxyEngineRequest.VolumeName,
"dataEngine": req.ProxyEngineRequest.DataEngine,
}).WithError(closeErr).Warn("Failed to close Controller client")
}
}()
err = c.VolumeExpand(req.Expand.Size)
if err != nil {
@ -151,7 +181,16 @@ func (ops V1DataEngineProxyOps) VolumeFrontendStart(ctx context.Context, req *rp
if err != nil {
return nil, err
}
defer c.Close()
defer func() {
if closeErr := c.Close(); closeErr != nil {
logrus.WithFields(logrus.Fields{
"serviceURL": req.ProxyEngineRequest.Address,
"engineName": req.ProxyEngineRequest.EngineName,
"volumeName": req.ProxyEngineRequest.VolumeName,
"dataEngine": req.ProxyEngineRequest.DataEngine,
}).WithError(closeErr).Warn("Failed to close Controller client")
}
}()
err = c.VolumeFrontendStart(req.FrontendStart.Frontend)
if err != nil {
@ -187,7 +226,16 @@ func (ops V1DataEngineProxyOps) VolumeFrontendShutdown(ctx context.Context, req
if err != nil {
return nil, err
}
defer c.Close()
defer func() {
if closeErr := c.Close(); closeErr != nil {
logrus.WithFields(logrus.Fields{
"serviceURL": req.Address,
"engineName": req.EngineName,
"volumeName": req.VolumeName,
"dataEngine": req.DataEngine,
}).WithError(closeErr).Warn("Failed to close Controller client")
}
}()
err = c.VolumeFrontendShutdown()
if err != nil {
@ -224,7 +272,16 @@ func (ops V1DataEngineProxyOps) VolumeUnmapMarkSnapChainRemovedSet(ctx context.C
if err != nil {
return nil, err
}
defer c.Close()
defer func() {
if closeErr := c.Close(); closeErr != nil {
logrus.WithFields(logrus.Fields{
"serviceURL": req.ProxyEngineRequest.Address,
"engineName": req.ProxyEngineRequest.EngineName,
"volumeName": req.ProxyEngineRequest.VolumeName,
"dataEngine": req.ProxyEngineRequest.DataEngine,
}).WithError(closeErr).Warn("Failed to close Controller client")
}
}()
err = c.VolumeUnmapMarkSnapChainRemovedSet(req.UnmapMarkSnap.Enabled)
if err != nil {
@ -261,7 +318,16 @@ func (ops V1DataEngineProxyOps) VolumeSnapshotMaxCountSet(ctx context.Context, r
if err != nil {
return nil, err
}
defer c.Close()
defer func() {
if closeErr := c.Close(); closeErr != nil {
logrus.WithFields(logrus.Fields{
"serviceURL": req.ProxyEngineRequest.Address,
"engineName": req.ProxyEngineRequest.EngineName,
"volumeName": req.ProxyEngineRequest.VolumeName,
"dataEngine": req.ProxyEngineRequest.DataEngine,
}).WithError(closeErr).Warn("Failed to close Controller client")
}
}()
err = c.VolumeSnapshotMaxCountSet(int(req.Count.Count))
if err != nil {
@ -298,7 +364,16 @@ func (ops V1DataEngineProxyOps) VolumeSnapshotMaxSizeSet(ctx context.Context, re
if err != nil {
return nil, err
}
defer c.Close()
defer func() {
if closeErr := c.Close(); closeErr != nil {
logrus.WithFields(logrus.Fields{
"serviceURL": req.ProxyEngineRequest.Address,
"engineName": req.ProxyEngineRequest.EngineName,
"volumeName": req.ProxyEngineRequest.VolumeName,
"dataEngine": req.ProxyEngineRequest.DataEngine,
}).WithError(closeErr).Warn("Failed to close Controller client")
}
}()
err = c.VolumeSnapshotMaxSizeSet(req.Size.Size)
if err != nil {

View File

@ -124,7 +124,9 @@ func (l LonghornWriter) StreamLog(done chan struct{}) (chan string, error) {
}
}
close(logChan)
file.Close()
if closeErr := file.Close(); closeErr != nil {
logrus.WithError(closeErr).Warn("Failed to close file")
}
}()
return logChan, nil
}

View File

@ -130,7 +130,9 @@ func IsSPDKTgtReady(timeout time.Duration) bool {
for i := 0; i < int(timeout.Seconds()); i++ {
conn, err := net.DialTimeout(spdkhelpertypes.DefaultJSONServerNetwork, spdkhelpertypes.DefaultUnixDomainSocketPath, 1*time.Second)
if err == nil {
conn.Close()
if closeErr := conn.Close(); closeErr != nil {
logrus.WithError(closeErr).Warn("Failed to close connection")
}
return true
}
time.Sleep(time.Second)

View File

@ -5,42 +5,93 @@ source $(dirname $0)/version
cd $(dirname $0)/..
PROJECT=`basename "$PWD"`
PROJECT=$(basename "${PWD}")
case $(uname -m) in
aarch64 | arm64)
ARCH=arm64
;;
x86_64)
ARCH=amd64
;;
s390x)
ARCH=s390x
;;
*)
echo "$(uname -a): unsupported architecture"
exit 1
esac
command -v buildx >/dev/null && BUILD_CMD=(buildx) || BUILD_CMD=(docker buildx)
if [ ! -x ./bin/longhorn ]; then
./scripts/build
# read configurable parameters
REPO=${REPO:-longhornio}
IMAGE_NAME=${IMAGE_NAME:-${PROJECT}}
TAG=${TAG:-''}
PUSH=${PUSH:-'false'}
IS_SECURE=${IS_SECURE:-'false'}
MACHINE=${MACHINE:-''}
TARGET_PLATFORMS=${TARGET_PLATFORMS:-''}
IID_FILE=${IID_FILE:-''}
IID_FILE_FLAG=${IID_FILE_FLAG:-''}
SRC_BRANCH=${SRC_BRANCH:-''}
SRC_TAG=${SRC_TAG:-''}
# TODO: implement self-contained build
[[ ! -x ./bin/longhorn-instance-manager ]] && ./scripts/build
if [[ -z $TAG ]]; then
if API_VERSION=$(./bin/longhorn-instance-manager version --client-only | jq ".clientVersion.instanceManagerAPIVersion"); then
TAG="v${API_VERSION}_$(date -u +%Y%m%d)"
else
TAG="${VERSION}"
fi
fi
cp -r bin package/
IMAGE="${REPO}/${IMAGE_NAME}:${TAG}"
APIVERSION=`./bin/longhorn-instance-manager version --client-only|jq ".clientVersion.instanceManagerAPIVersion"`
TAG=${TAG:-v${APIVERSION}_`date -u +%Y%m%d`}
REPO=${REPO:-longhornio}
IMAGE=${REPO}/${PROJECT}:${TAG}
BUILDER_ARGS=()
[[ ${MACHINE} ]] && BUILDER_ARGS+=('--builder' "${MACHINE}")
IFS=' ' read -r -a IID_FILE_ARGS <<<"$IID_FILE_FLAG"
[[ -n "${IID_FILE}" && ${#IID_FILE_ARGS} == 0 ]] && IID_FILE_ARGS=('--iidfile' "${IID_FILE}")
BUILDX_ARGS=()
if [[ "${PUSH}" == 'true' ]]; then
BUILDX_ARGS+=('--push')
else
BUILDX_ARGS+=('--load')
fi
[[ ${IS_SECURE} == 'true' ]] && BUILDX_ARGS+=('--sbom=true' '--attest' 'type=provenance,mode=max')
if [[ ${TARGET_PLATFORMS} ]] ; then
IFS='/' read -r OS ARCH <<<"${TARGET_PLATFORMS}"
BUILDX_ARGS+=('--platform' "${TARGET_PLATFORMS}")
else
case $(uname -m) in
aarch64 | arm64)
ARCH=arm64
;;
x86_64)
ARCH=amd64
;;
*)
echo "$(uname -a): unsupported architecture"
exit 1
esac
BUILDX_ARGS+=('--platform' "linux/${ARCH}")
fi
IMAGE_ARGS=(--build-arg ARCH="${ARCH}")
[[ -n "${SRC_BRANCH}" ]] && IMAGE_ARGS+=(--build-arg SRC_BRANCH="${SRC_BRANCH}")
[[ -n "${SRC_TAG}" ]] && IMAGE_ARGS+=(--build-arg SRC_TAG="${SRC_TAG}")
# update base image to get latest changes
grep FROM package/Dockerfile | awk '{print $2}' | while read -r BASE_IMAGE
grep 'FROM.*/' package/Dockerfile | awk '{print $2}' | while read -r BASE_IMAGE
do
docker pull "$BASE_IMAGE"
docker pull "${BASE_IMAGE}"
done
docker build --build-arg ARCH=${ARCH} -t ${IMAGE} -f package/Dockerfile .
echo "Building ${IMAGE} with ARCH=${ARCH} SRC_BRANCH=${SRC_BRANCH} SRC_TAG=${SRC_TAG}"
IMAGE_BUILD_CMD_ARGS=(
build --no-cache \
"${BUILDER_ARGS[@]}" \
"${IID_FILE_ARGS[@]}" \
"${BUILDX_ARGS[@]}" \
"${IMAGE_ARGS[@]}" \
-t "${IMAGE}" -f package/Dockerfile .
)
echo "${BUILD_CMD[@]}" "${IMAGE_BUILD_CMD_ARGS[@]}"
"${BUILD_CMD[@]}" "${IMAGE_BUILD_CMD_ARGS[@]}"
echo Built ${IMAGE}
echo "Built ${IMAGE}"
echo ${IMAGE} > ./bin/latest_image
mkdir -p ./bin
echo "${IMAGE}" > ./bin/latest_image

View File

@ -349,7 +349,7 @@ func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile s
if cfg.hasSSOTokenProviderConfiguration() {
skippedFiles = 0
for _, f := range files {
section, ok := f.IniData.GetSection(fmt.Sprintf(ssoSectionPrefix + strings.TrimSpace(cfg.SSOSessionName)))
section, ok := f.IniData.GetSection(ssoSectionPrefix + strings.TrimSpace(cfg.SSOSessionName))
if ok {
var ssoSession ssoSession
ssoSession.setFromIniSection(section)

View File

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
const SDKVersion = "1.55.5"
const SDKVersion = "1.55.7"

View File

@ -1,3 +1,4 @@
// Package md2man aims in converting markdown into roff (man pages).
package md2man
import (

View File

@ -47,13 +47,13 @@ const (
tableStart = "\n.TS\nallbox;\n"
tableEnd = ".TE\n"
tableCellStart = "T{\n"
tableCellEnd = "\nT}\n"
tableCellEnd = "\nT}"
tablePreprocessor = `'\" t`
)
// NewRoffRenderer creates a new blackfriday Renderer for generating roff documents
// from markdown
func NewRoffRenderer() *roffRenderer { // nolint: golint
func NewRoffRenderer() *roffRenderer {
return &roffRenderer{}
}
@ -104,7 +104,7 @@ func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering
node.Parent.Prev.Type == blackfriday.Heading &&
node.Parent.Prev.FirstChild != nil &&
bytes.EqualFold(node.Parent.Prev.FirstChild.Literal, []byte("NAME")) {
before, after, found := bytes.Cut(node.Literal, []byte(" - "))
before, after, found := bytesCut(node.Literal, []byte(" - "))
escapeSpecialChars(w, before)
if found {
out(w, ` \- `)
@ -316,9 +316,8 @@ func (r *roffRenderer) handleTableCell(w io.Writer, node *blackfriday.Node, ente
} else if nodeLiteralSize(node) > 30 {
end = tableCellEnd
}
if node.Next == nil && end != tableCellEnd {
// Last cell: need to carriage return if we are at the end of the
// header row and content isn't wrapped in a "tablecell"
if node.Next == nil {
// Last cell: need to carriage return if we are at the end of the header row.
end += crTag
}
out(w, end)
@ -356,7 +355,7 @@ func countColumns(node *blackfriday.Node) int {
}
func out(w io.Writer, output string) {
io.WriteString(w, output) // nolint: errcheck
io.WriteString(w, output) //nolint:errcheck
}
func escapeSpecialChars(w io.Writer, text []byte) {
@ -395,7 +394,7 @@ func escapeSpecialCharsLine(w io.Writer, text []byte) {
i++
}
if i > org {
w.Write(text[org:i]) // nolint: errcheck
w.Write(text[org:i]) //nolint:errcheck
}
// escape a character
@ -403,6 +402,15 @@ func escapeSpecialCharsLine(w io.Writer, text []byte) {
break
}
w.Write([]byte{'\\', text[i]}) // nolint: errcheck
w.Write([]byte{'\\', text[i]}) //nolint:errcheck
}
}
// bytesCut is a copy of [bytes.Cut] to provide compatibility with go1.17
// and older. We can remove this once we drop support for go1.17 and older.
func bytesCut(s, sep []byte) (before, after []byte, found bool) {
if i := bytes.Index(s, sep); i >= 0 {
return s[:i], s[i+len(sep):], true
}
return s, nil, false
}

View File

@ -1,180 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ptypes
import (
"fmt"
"strings"
"github.com/golang/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
anypb "github.com/golang/protobuf/ptypes/any"
)
const urlPrefix = "type.googleapis.com/"
// AnyMessageName returns the message name contained in an anypb.Any message.
// Most type assertions should use the Is function instead.
//
// Deprecated: Call the any.MessageName method instead.
func AnyMessageName(any *anypb.Any) (string, error) {
name, err := anyMessageName(any)
return string(name), err
}
func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) {
if any == nil {
return "", fmt.Errorf("message is nil")
}
name := protoreflect.FullName(any.TypeUrl)
if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 {
name = name[i+len("/"):]
}
if !name.IsValid() {
return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
}
return name, nil
}
// MarshalAny marshals the given message m into an anypb.Any message.
//
// Deprecated: Call the anypb.New function instead.
func MarshalAny(m proto.Message) (*anypb.Any, error) {
switch dm := m.(type) {
case DynamicAny:
m = dm.Message
case *DynamicAny:
if dm == nil {
return nil, proto.ErrNil
}
m = dm.Message
}
b, err := proto.Marshal(m)
if err != nil {
return nil, err
}
return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil
}
// Empty returns a new message of the type specified in an anypb.Any message.
// It returns protoregistry.NotFound if the corresponding message type could not
// be resolved in the global registry.
//
// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead
// to resolve the message name and create a new instance of it.
func Empty(any *anypb.Any) (proto.Message, error) {
name, err := anyMessageName(any)
if err != nil {
return nil, err
}
mt, err := protoregistry.GlobalTypes.FindMessageByName(name)
if err != nil {
return nil, err
}
return proto.MessageV1(mt.New().Interface()), nil
}
// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message
// into the provided message m. It returns an error if the target message
// does not match the type in the Any message or if an unmarshal error occurs.
//
// The target message m may be a *DynamicAny message. If the underlying message
// type could not be resolved, then this returns protoregistry.NotFound.
//
// Deprecated: Call the any.UnmarshalTo method instead.
func UnmarshalAny(any *anypb.Any, m proto.Message) error {
if dm, ok := m.(*DynamicAny); ok {
if dm.Message == nil {
var err error
dm.Message, err = Empty(any)
if err != nil {
return err
}
}
m = dm.Message
}
anyName, err := AnyMessageName(any)
if err != nil {
return err
}
msgName := proto.MessageName(m)
if anyName != msgName {
return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName)
}
return proto.Unmarshal(any.Value, m)
}
// Is reports whether the Any message contains a message of the specified type.
//
// Deprecated: Call the any.MessageIs method instead.
func Is(any *anypb.Any, m proto.Message) bool {
if any == nil || m == nil {
return false
}
name := proto.MessageName(m)
if !strings.HasSuffix(any.TypeUrl, name) {
return false
}
return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/'
}
// DynamicAny is a value that can be passed to UnmarshalAny to automatically
// allocate a proto.Message for the type specified in an anypb.Any message.
// The allocated message is stored in the embedded proto.Message.
//
// Example:
//
// var x ptypes.DynamicAny
// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
// fmt.Printf("unmarshaled message: %v", x.Message)
//
// Deprecated: Use the any.UnmarshalNew method instead to unmarshal
// the any message contents into a new instance of the underlying message.
type DynamicAny struct{ proto.Message }
func (m DynamicAny) String() string {
if m.Message == nil {
return "<nil>"
}
return m.Message.String()
}
func (m DynamicAny) Reset() {
if m.Message == nil {
return
}
m.Message.Reset()
}
func (m DynamicAny) ProtoMessage() {
return
}
func (m DynamicAny) ProtoReflect() protoreflect.Message {
if m.Message == nil {
return nil
}
return dynamicAny{proto.MessageReflect(m.Message)}
}
type dynamicAny struct{ protoreflect.Message }
func (m dynamicAny) Type() protoreflect.MessageType {
return dynamicAnyType{m.Message.Type()}
}
func (m dynamicAny) New() protoreflect.Message {
return dynamicAnyType{m.Message.Type()}.New()
}
func (m dynamicAny) Interface() protoreflect.ProtoMessage {
return DynamicAny{proto.MessageV1(m.Message.Interface())}
}
type dynamicAnyType struct{ protoreflect.MessageType }
func (t dynamicAnyType) New() protoreflect.Message {
return dynamicAny{t.MessageType.New()}
}
func (t dynamicAnyType) Zero() protoreflect.Message {
return dynamicAny{t.MessageType.Zero()}
}

View File

@ -1,62 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: github.com/golang/protobuf/ptypes/any/any.proto
package any
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
anypb "google.golang.org/protobuf/types/known/anypb"
reflect "reflect"
)
// Symbols defined in public import of google/protobuf/any.proto.
type Any = anypb.Any
var File_github_com_golang_protobuf_ptypes_any_any_proto protoreflect.FileDescriptor
var file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = []byte{
0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x2b, 0x5a, 0x29,
0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e,
0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65,
0x73, 0x2f, 0x61, 0x6e, 0x79, 0x3b, 0x61, 0x6e, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x33,
}
var file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = []interface{}{}
var file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = []int32{
0, // [0:0] is the sub-list for method output_type
0, // [0:0] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_github_com_golang_protobuf_ptypes_any_any_proto_init() }
func file_github_com_golang_protobuf_ptypes_any_any_proto_init() {
if File_github_com_golang_protobuf_ptypes_any_any_proto != nil {
return
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc,
NumEnums: 0,
NumMessages: 0,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes,
DependencyIndexes: file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs,
}.Build()
File_github_com_golang_protobuf_ptypes_any_any_proto = out.File
file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = nil
file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = nil
file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = nil
}

View File

@ -1,10 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package ptypes provides functionality for interacting with well-known types.
//
// Deprecated: Well-known types have specialized functionality directly
// injected into the generated packages for each message type.
// See the deprecation notice for each function for the suggested alternative.
package ptypes

View File

@ -1,76 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ptypes
import (
"errors"
"fmt"
"time"
durationpb "github.com/golang/protobuf/ptypes/duration"
)
// Range of google.protobuf.Duration as specified in duration.proto.
// This is about 10,000 years in seconds.
const (
maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
minSeconds = -maxSeconds
)
// Duration converts a durationpb.Duration to a time.Duration.
// Duration returns an error if dur is invalid or overflows a time.Duration.
//
// Deprecated: Call the dur.AsDuration and dur.CheckValid methods instead.
func Duration(dur *durationpb.Duration) (time.Duration, error) {
if err := validateDuration(dur); err != nil {
return 0, err
}
d := time.Duration(dur.Seconds) * time.Second
if int64(d/time.Second) != dur.Seconds {
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
}
if dur.Nanos != 0 {
d += time.Duration(dur.Nanos) * time.Nanosecond
if (d < 0) != (dur.Nanos < 0) {
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
}
}
return d, nil
}
// DurationProto converts a time.Duration to a durationpb.Duration.
//
// Deprecated: Call the durationpb.New function instead.
func DurationProto(d time.Duration) *durationpb.Duration {
nanos := d.Nanoseconds()
secs := nanos / 1e9
nanos -= secs * 1e9
return &durationpb.Duration{
Seconds: int64(secs),
Nanos: int32(nanos),
}
}
// validateDuration determines whether the durationpb.Duration is valid
// according to the definition in google/protobuf/duration.proto.
// A valid durpb.Duration may still be too large to fit into a time.Duration
// Note that the range of durationpb.Duration is about 10,000 years,
// while the range of time.Duration is about 290 years.
func validateDuration(dur *durationpb.Duration) error {
if dur == nil {
return errors.New("duration: nil Duration")
}
if dur.Seconds < minSeconds || dur.Seconds > maxSeconds {
return fmt.Errorf("duration: %v: seconds out of range", dur)
}
if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 {
return fmt.Errorf("duration: %v: nanos out of range", dur)
}
// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) {
return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur)
}
return nil
}

View File

@ -1,63 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: github.com/golang/protobuf/ptypes/duration/duration.proto
package duration
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
durationpb "google.golang.org/protobuf/types/known/durationpb"
reflect "reflect"
)
// Symbols defined in public import of google/protobuf/duration.proto.
type Duration = durationpb.Duration
var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor
var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{
0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67,
0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73,
0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{}
var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{
0, // [0:0] is the sub-list for method output_type
0, // [0:0] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() }
func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() {
if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil {
return
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc,
NumEnums: 0,
NumMessages: 0,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes,
DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs,
}.Build()
File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File
file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil
file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil
file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil
}

View File

@ -1,112 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ptypes
import (
"errors"
"fmt"
"time"
timestamppb "github.com/golang/protobuf/ptypes/timestamp"
)
// Range of google.protobuf.Duration as specified in timestamp.proto.
const (
// Seconds field of the earliest valid Timestamp.
// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
minValidSeconds = -62135596800
// Seconds field just after the latest valid Timestamp.
// This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
maxValidSeconds = 253402300800
)
// Timestamp converts a timestamppb.Timestamp to a time.Time.
// It returns an error if the argument is invalid.
//
// Unlike most Go functions, if Timestamp returns an error, the first return
// value is not the zero time.Time. Instead, it is the value obtained from the
// time.Unix function when passed the contents of the Timestamp, in the UTC
// locale. This may or may not be a meaningful time; many invalid Timestamps
// do map to valid time.Times.
//
// A nil Timestamp returns an error. The first return value in that case is
// undefined.
//
// Deprecated: Call the ts.AsTime and ts.CheckValid methods instead.
func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) {
// Don't return the zero value on error, because corresponds to a valid
// timestamp. Instead return whatever time.Unix gives us.
var t time.Time
if ts == nil {
t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
} else {
t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
}
return t, validateTimestamp(ts)
}
// TimestampNow returns a google.protobuf.Timestamp for the current time.
//
// Deprecated: Call the timestamppb.Now function instead.
func TimestampNow() *timestamppb.Timestamp {
ts, err := TimestampProto(time.Now())
if err != nil {
panic("ptypes: time.Now() out of Timestamp range")
}
return ts
}
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
// It returns an error if the resulting Timestamp is invalid.
//
// Deprecated: Call the timestamppb.New function instead.
func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) {
ts := &timestamppb.Timestamp{
Seconds: t.Unix(),
Nanos: int32(t.Nanosecond()),
}
if err := validateTimestamp(ts); err != nil {
return nil, err
}
return ts, nil
}
// TimestampString returns the RFC 3339 string for valid Timestamps.
// For invalid Timestamps, it returns an error message in parentheses.
//
// Deprecated: Call the ts.AsTime method instead,
// followed by a call to the Format method on the time.Time value.
func TimestampString(ts *timestamppb.Timestamp) string {
t, err := Timestamp(ts)
if err != nil {
return fmt.Sprintf("(%v)", err)
}
return t.Format(time.RFC3339Nano)
}
// validateTimestamp determines whether a Timestamp is valid.
// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01)
// and has a Nanos field in the range [0, 1e9).
//
// If the Timestamp is valid, validateTimestamp returns nil.
// Otherwise, it returns an error that describes the problem.
//
// Every valid Timestamp can be represented by a time.Time,
// but the converse is not true.
func validateTimestamp(ts *timestamppb.Timestamp) error {
if ts == nil {
return errors.New("timestamp: nil Timestamp")
}
if ts.Seconds < minValidSeconds {
return fmt.Errorf("timestamp: %v before 0001-01-01", ts)
}
if ts.Seconds >= maxValidSeconds {
return fmt.Errorf("timestamp: %v after 10000-01-01", ts)
}
if ts.Nanos < 0 || ts.Nanos >= 1e9 {
return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts)
}
return nil
}

View File

@ -1,64 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
package timestamp
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
reflect "reflect"
)
// Symbols defined in public import of google/protobuf/timestamp.proto.
type Timestamp = timestamppb.Timestamp
var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{
0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69,
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74,
0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37,
0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69,
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x33,
}
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{}
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{
0, // [0:0] is the sub-list for method output_type
0, // [0:0] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() }
func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() {
if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil {
return
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc,
NumEnums: 0,
NumMessages: 0,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes,
DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs,
}.Build()
File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil
}

View File

@ -20,8 +20,8 @@ import (
"os/exec"
"strings"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes/any"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/anypb"
yaml "gopkg.in/yaml.v3"
extensions "github.com/google/gnostic-models/extensions"
@ -33,7 +33,7 @@ type ExtensionHandler struct {
}
// CallExtension calls a binary extension handler.
func CallExtension(context *Context, in *yaml.Node, extensionName string) (handled bool, response *any.Any, err error) {
func CallExtension(context *Context, in *yaml.Node, extensionName string) (handled bool, response *anypb.Any, err error) {
if context == nil || context.ExtensionHandlers == nil {
return false, nil, nil
}
@ -50,7 +50,7 @@ func CallExtension(context *Context, in *yaml.Node, extensionName string) (handl
return handled, response, err
}
func (extensionHandlers *ExtensionHandler) handle(in *yaml.Node, extensionName string) (*any.Any, error) {
func (extensionHandlers *ExtensionHandler) handle(in *yaml.Node, extensionName string) (*anypb.Any, error) {
if extensionHandlers.Name != "" {
yamlData, _ := yaml.Marshal(in)
request := &extensions.ExtensionHandlerRequest{

View File

@ -14,8 +14,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.27.1
// protoc v3.19.3
// protoc-gen-go v1.35.1
// protoc v4.23.4
// source: extensions/extension.proto
package gnostic_extension_v1
@ -51,11 +51,9 @@ type Version struct {
func (x *Version) Reset() {
*x = Version{}
if protoimpl.UnsafeEnabled {
mi := &file_extensions_extension_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
mi := &file_extensions_extension_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Version) String() string {
@ -66,7 +64,7 @@ func (*Version) ProtoMessage() {}
func (x *Version) ProtoReflect() protoreflect.Message {
mi := &file_extensions_extension_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@ -123,11 +121,9 @@ type ExtensionHandlerRequest struct {
func (x *ExtensionHandlerRequest) Reset() {
*x = ExtensionHandlerRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_extensions_extension_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
mi := &file_extensions_extension_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ExtensionHandlerRequest) String() string {
@ -138,7 +134,7 @@ func (*ExtensionHandlerRequest) ProtoMessage() {}
func (x *ExtensionHandlerRequest) ProtoReflect() protoreflect.Message {
mi := &file_extensions_extension_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@ -191,11 +187,9 @@ type ExtensionHandlerResponse struct {
func (x *ExtensionHandlerResponse) Reset() {
*x = ExtensionHandlerResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_extensions_extension_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
mi := &file_extensions_extension_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ExtensionHandlerResponse) String() string {
@ -206,7 +200,7 @@ func (*ExtensionHandlerResponse) ProtoMessage() {}
func (x *ExtensionHandlerResponse) ProtoReflect() protoreflect.Message {
mi := &file_extensions_extension_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@ -257,11 +251,9 @@ type Wrapper struct {
func (x *Wrapper) Reset() {
*x = Wrapper{}
if protoimpl.UnsafeEnabled {
mi := &file_extensions_extension_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
mi := &file_extensions_extension_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Wrapper) String() string {
@ -272,7 +264,7 @@ func (*Wrapper) ProtoMessage() {}
func (x *Wrapper) ProtoReflect() protoreflect.Message {
mi := &file_extensions_extension_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@ -367,7 +359,7 @@ func file_extensions_extension_proto_rawDescGZIP() []byte {
}
var file_extensions_extension_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
var file_extensions_extension_proto_goTypes = []interface{}{
var file_extensions_extension_proto_goTypes = []any{
(*Version)(nil), // 0: gnostic.extension.v1.Version
(*ExtensionHandlerRequest)(nil), // 1: gnostic.extension.v1.ExtensionHandlerRequest
(*ExtensionHandlerResponse)(nil), // 2: gnostic.extension.v1.ExtensionHandlerResponse
@ -390,56 +382,6 @@ func file_extensions_extension_proto_init() {
if File_extensions_extension_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_extensions_extension_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Version); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_extensions_extension_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ExtensionHandlerRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_extensions_extension_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ExtensionHandlerResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_extensions_extension_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Wrapper); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{

View File

@ -19,8 +19,8 @@ import (
"log"
"os"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/anypb"
)
type extensionHandler func(name string, yamlInput string) (bool, proto.Message, error)
@ -54,7 +54,7 @@ func Main(handler extensionHandler) {
response.Errors = append(response.Errors, err.Error())
} else if handled {
response.Handled = true
response.Value, err = ptypes.MarshalAny(output)
response.Value, err = anypb.New(output)
if err != nil {
response.Errors = append(response.Errors, err.Error())
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,182 @@
// Copyright 2022 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.35.1
// protoc v4.23.4
// source: openapiv3/annotations.proto
package openapi_v3
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
descriptorpb "google.golang.org/protobuf/types/descriptorpb"
reflect "reflect"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
var file_openapiv3_annotations_proto_extTypes = []protoimpl.ExtensionInfo{
{
ExtendedType: (*descriptorpb.FileOptions)(nil),
ExtensionType: (*Document)(nil),
Field: 1143,
Name: "openapi.v3.document",
Tag: "bytes,1143,opt,name=document",
Filename: "openapiv3/annotations.proto",
},
{
ExtendedType: (*descriptorpb.MethodOptions)(nil),
ExtensionType: (*Operation)(nil),
Field: 1143,
Name: "openapi.v3.operation",
Tag: "bytes,1143,opt,name=operation",
Filename: "openapiv3/annotations.proto",
},
{
ExtendedType: (*descriptorpb.MessageOptions)(nil),
ExtensionType: (*Schema)(nil),
Field: 1143,
Name: "openapi.v3.schema",
Tag: "bytes,1143,opt,name=schema",
Filename: "openapiv3/annotations.proto",
},
{
ExtendedType: (*descriptorpb.FieldOptions)(nil),
ExtensionType: (*Schema)(nil),
Field: 1143,
Name: "openapi.v3.property",
Tag: "bytes,1143,opt,name=property",
Filename: "openapiv3/annotations.proto",
},
}
// Extension fields to descriptorpb.FileOptions.
var (
// optional openapi.v3.Document document = 1143;
E_Document = &file_openapiv3_annotations_proto_extTypes[0]
)
// Extension fields to descriptorpb.MethodOptions.
var (
// optional openapi.v3.Operation operation = 1143;
E_Operation = &file_openapiv3_annotations_proto_extTypes[1]
)
// Extension fields to descriptorpb.MessageOptions.
var (
// optional openapi.v3.Schema schema = 1143;
E_Schema = &file_openapiv3_annotations_proto_extTypes[2]
)
// Extension fields to descriptorpb.FieldOptions.
var (
// optional openapi.v3.Schema property = 1143;
E_Property = &file_openapiv3_annotations_proto_extTypes[3]
)
var File_openapiv3_annotations_proto protoreflect.FileDescriptor
var file_openapiv3_annotations_proto_rawDesc = []byte{
0x0a, 0x1b, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x61, 0x6e, 0x6e, 0x6f,
0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x6f,
0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72,
0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x6f, 0x70, 0x65,
0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x4f, 0x70, 0x65, 0x6e, 0x41, 0x50, 0x49, 0x76, 0x33,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x4f, 0x0a, 0x08, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65,
0x6e, 0x74, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
0x69, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x08, 0x64,
0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x3a, 0x54, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74,
0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70,
0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x4c, 0x0a,
0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67,
0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68,
0x65, 0x6d, 0x61, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x3a, 0x4e, 0x0a, 0x08, 0x70,
0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f,
0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e,
0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d,
0x61, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x42, 0x42, 0x0a, 0x0e, 0x6f,
0x72, 0x67, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0x42, 0x10, 0x41,
0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
0x01, 0x5a, 0x16, 0x2e, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x3b, 0x6f,
0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0xa2, 0x02, 0x03, 0x4f, 0x41, 0x53, 0x62,
0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var file_openapiv3_annotations_proto_goTypes = []any{
(*descriptorpb.FileOptions)(nil), // 0: google.protobuf.FileOptions
(*descriptorpb.MethodOptions)(nil), // 1: google.protobuf.MethodOptions
(*descriptorpb.MessageOptions)(nil), // 2: google.protobuf.MessageOptions
(*descriptorpb.FieldOptions)(nil), // 3: google.protobuf.FieldOptions
(*Document)(nil), // 4: openapi.v3.Document
(*Operation)(nil), // 5: openapi.v3.Operation
(*Schema)(nil), // 6: openapi.v3.Schema
}
var file_openapiv3_annotations_proto_depIdxs = []int32{
0, // 0: openapi.v3.document:extendee -> google.protobuf.FileOptions
1, // 1: openapi.v3.operation:extendee -> google.protobuf.MethodOptions
2, // 2: openapi.v3.schema:extendee -> google.protobuf.MessageOptions
3, // 3: openapi.v3.property:extendee -> google.protobuf.FieldOptions
4, // 4: openapi.v3.document:type_name -> openapi.v3.Document
5, // 5: openapi.v3.operation:type_name -> openapi.v3.Operation
6, // 6: openapi.v3.schema:type_name -> openapi.v3.Schema
6, // 7: openapi.v3.property:type_name -> openapi.v3.Schema
8, // [8:8] is the sub-list for method output_type
8, // [8:8] is the sub-list for method input_type
4, // [4:8] is the sub-list for extension type_name
0, // [0:4] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_openapiv3_annotations_proto_init() }
func file_openapiv3_annotations_proto_init() {
if File_openapiv3_annotations_proto != nil {
return
}
file_openapiv3_OpenAPIv3_proto_init()
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_openapiv3_annotations_proto_rawDesc,
NumEnums: 0,
NumMessages: 0,
NumExtensions: 4,
NumServices: 0,
},
GoTypes: file_openapiv3_annotations_proto_goTypes,
DependencyIndexes: file_openapiv3_annotations_proto_depIdxs,
ExtensionInfos: file_openapiv3_annotations_proto_extTypes,
}.Build()
File_openapiv3_annotations_proto = out.File
file_openapiv3_annotations_proto_rawDesc = nil
file_openapiv3_annotations_proto_goTypes = nil
file_openapiv3_annotations_proto_depIdxs = nil
}

View File

@ -0,0 +1,56 @@
// Copyright 2022 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package openapi.v3;
import "google/protobuf/descriptor.proto";
import "openapiv3/OpenAPIv3.proto";
// The Go package name.
option go_package = "./openapiv3;openapi_v3";
// This option lets the proto compiler generate Java code inside the package
// name (see below) instead of inside an outer class. It creates a simpler
// developer experience by reducing one-level of name nesting and be
// consistent with most programming languages that don't support outer classes.
option java_multiple_files = true;
// The Java outer classname should be the filename in UpperCamelCase. This
// class is only used to hold proto descriptor, so developers don't need to
// work with it directly.
option java_outer_classname = "AnnotationsProto";
// The Java package name must be proto package name with proper prefix.
option java_package = "org.openapi_v3";
// A reasonable prefix for the Objective-C symbols generated from the package.
// It should at a minimum be 3 characters long, all uppercase, and convention
// is to use an abbreviation of the package name. Something short, but
// hopefully unique enough to not conflict with things that may come along in
// the future. 'GPB' is reserved for the protocol buffer implementation itself.
option objc_class_prefix = "OAS";
extend google.protobuf.FileOptions {
Document document = 1143;
}
extend google.protobuf.MethodOptions {
Operation operation = 1143;
}
extend google.protobuf.MessageOptions {
Schema schema = 1143;
}
extend google.protobuf.FieldOptions {
Schema property = 1143;
}

View File

@ -19,6 +19,7 @@ const (
tbFunc // func(T) bool
ttbFunc // func(T, T) bool
ttiFunc // func(T, T) int
trbFunc // func(T, R) bool
tibFunc // func(T, I) bool
trFunc // func(T) R
@ -28,11 +29,13 @@ const (
Transformer = trFunc // func(T) R
ValueFilter = ttbFunc // func(T, T) bool
Less = ttbFunc // func(T, T) bool
Compare = ttiFunc // func(T, T) int
ValuePredicate = tbFunc // func(T) bool
KeyValuePredicate = trbFunc // func(T, R) bool
)
var boolType = reflect.TypeOf(true)
var intType = reflect.TypeOf(0)
// IsType reports whether the reflect.Type is of the specified function type.
func IsType(t reflect.Type, ft funcType) bool {
@ -49,6 +52,10 @@ func IsType(t reflect.Type, ft funcType) bool {
if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType {
return true
}
case ttiFunc: // func(T, T) int
if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == intType {
return true
}
case trbFunc: // func(T, R) bool
if ni == 2 && no == 1 && t.Out(0) == boolType {
return true

View File

@ -232,7 +232,15 @@ func (validator) apply(s *state, vx, vy reflect.Value) {
if t := s.curPath.Index(-2).Type(); t.Name() != "" {
// Named type with unexported fields.
name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType
if _, ok := reflect.New(t).Interface().(error); ok {
isProtoMessage := func(t reflect.Type) bool {
m, ok := reflect.PointerTo(t).MethodByName("ProtoReflect")
return ok && m.Type.NumIn() == 1 && m.Type.NumOut() == 1 &&
m.Type.Out(0).PkgPath() == "google.golang.org/protobuf/reflect/protoreflect" &&
m.Type.Out(0).Name() == "Message"
}
if isProtoMessage(t) {
help = `consider using "google.golang.org/protobuf/testing/protocmp".Transform to compare proto.Message types`
} else if _, ok := reflect.New(t).Interface().(error); ok {
help = "consider using cmpopts.EquateErrors to compare error values"
} else if t.Comparable() {
help = "consider using cmpopts.EquateComparable to compare comparable Go types"

View File

@ -1,10 +0,0 @@
language: go
go:
- 1.11.x
- 1.12.x
- 1.13.x
- master
script:
- go test -cover

View File

@ -1,67 +0,0 @@
# How to contribute #
We'd love to accept your patches and contributions to this project. There are
just a few small guidelines you need to follow.
## Contributor License Agreement ##
Contributions to any Google project must be accompanied by a Contributor
License Agreement. This is not a copyright **assignment**, it simply gives
Google permission to use and redistribute your contributions as part of the
project.
* If you are an individual writing original source code and you're sure you
own the intellectual property, then you'll need to sign an [individual
CLA][].
* If you work for a company that wants to allow you to contribute your work,
then you'll need to sign a [corporate CLA][].
You generally only need to submit a CLA once, so if you've already submitted
one (even if it was for a different project), you probably don't need to do it
again.
[individual CLA]: https://developers.google.com/open-source/cla/individual
[corporate CLA]: https://developers.google.com/open-source/cla/corporate
## Submitting a patch ##
1. It's generally best to start by opening a new issue describing the bug or
feature you're intending to fix. Even if you think it's relatively minor,
it's helpful to know what people are working on. Mention in the initial
issue that you are planning to work on that bug or feature so that it can
be assigned to you.
1. Follow the normal process of [forking][] the project, and setup a new
branch to work in. It's important that each group of changes be done in
separate branches in order to ensure that a pull request only includes the
commits related to that bug or feature.
1. Go makes it very simple to ensure properly formatted code, so always run
`go fmt` on your code before committing it. You should also run
[golint][] over your code. As noted in the [golint readme][], it's not
strictly necessary that your code be completely "lint-free", but this will
help you find common style issues.
1. Any significant changes should almost always be accompanied by tests. The
project already has good test coverage, so look at some of the existing
tests if you're unsure how to go about it. [gocov][] and [gocov-html][]
are invaluable tools for seeing which parts of your code aren't being
exercised by your tests.
1. Do your best to have [well-formed commit messages][] for each change.
This provides consistency throughout the project, and ensures that commit
messages are able to be formatted properly by various git tools.
1. Finally, push the commits to your fork and submit a [pull request][].
[forking]: https://help.github.com/articles/fork-a-repo
[golint]: https://github.com/golang/lint
[golint readme]: https://github.com/golang/lint/blob/master/README
[gocov]: https://github.com/axw/gocov
[gocov-html]: https://github.com/matm/gocov-html
[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html
[squash]: http://git-scm.com/book/en/Git-Tools-Rewriting-History#Squashing-Commits
[pull request]: https://help.github.com/articles/creating-a-pull-request

View File

@ -1,18 +0,0 @@
/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package fuzz is a library for populating go objects with random values.
package fuzz

View File

@ -1,605 +0,0 @@
/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fuzz
import (
"fmt"
"math/rand"
"reflect"
"regexp"
"time"
"github.com/google/gofuzz/bytesource"
"strings"
)
// fuzzFuncMap is a map from a type to a fuzzFunc that handles that type.
type fuzzFuncMap map[reflect.Type]reflect.Value
// Fuzzer knows how to fill any object with random fields.
type Fuzzer struct {
fuzzFuncs fuzzFuncMap
defaultFuzzFuncs fuzzFuncMap
r *rand.Rand
nilChance float64
minElements int
maxElements int
maxDepth int
skipFieldPatterns []*regexp.Regexp
}
// New returns a new Fuzzer. Customize your Fuzzer further by calling Funcs,
// RandSource, NilChance, or NumElements in any order.
func New() *Fuzzer {
return NewWithSeed(time.Now().UnixNano())
}
func NewWithSeed(seed int64) *Fuzzer {
f := &Fuzzer{
defaultFuzzFuncs: fuzzFuncMap{
reflect.TypeOf(&time.Time{}): reflect.ValueOf(fuzzTime),
},
fuzzFuncs: fuzzFuncMap{},
r: rand.New(rand.NewSource(seed)),
nilChance: .2,
minElements: 1,
maxElements: 10,
maxDepth: 100,
}
return f
}
// NewFromGoFuzz is a helper function that enables using gofuzz (this
// project) with go-fuzz (https://github.com/dvyukov/go-fuzz) for continuous
// fuzzing. Essentially, it enables translating the fuzzing bytes from
// go-fuzz to any Go object using this library.
//
// This implementation promises a constant translation from a given slice of
// bytes to the fuzzed objects. This promise will remain over future
// versions of Go and of this library.
//
// Note: the returned Fuzzer should not be shared between multiple goroutines,
// as its deterministic output will no longer be available.
//
// Example: use go-fuzz to test the function `MyFunc(int)` in the package
// `mypackage`. Add the file: "mypacakge_fuzz.go" with the content:
//
// // +build gofuzz
// package mypacakge
// import fuzz "github.com/google/gofuzz"
// func Fuzz(data []byte) int {
// var i int
// fuzz.NewFromGoFuzz(data).Fuzz(&i)
// MyFunc(i)
// return 0
// }
func NewFromGoFuzz(data []byte) *Fuzzer {
return New().RandSource(bytesource.New(data))
}
// Funcs adds each entry in fuzzFuncs as a custom fuzzing function.
//
// Each entry in fuzzFuncs must be a function taking two parameters.
// The first parameter must be a pointer or map. It is the variable that
// function will fill with random data. The second parameter must be a
// fuzz.Continue, which will provide a source of randomness and a way
// to automatically continue fuzzing smaller pieces of the first parameter.
//
// These functions are called sensibly, e.g., if you wanted custom string
// fuzzing, the function `func(s *string, c fuzz.Continue)` would get
// called and passed the address of strings. Maps and pointers will always
// be made/new'd for you, ignoring the NilChange option. For slices, it
// doesn't make much sense to pre-create them--Fuzzer doesn't know how
// long you want your slice--so take a pointer to a slice, and make it
// yourself. (If you don't want your map/pointer type pre-made, take a
// pointer to it, and make it yourself.) See the examples for a range of
// custom functions.
func (f *Fuzzer) Funcs(fuzzFuncs ...interface{}) *Fuzzer {
for i := range fuzzFuncs {
v := reflect.ValueOf(fuzzFuncs[i])
if v.Kind() != reflect.Func {
panic("Need only funcs!")
}
t := v.Type()
if t.NumIn() != 2 || t.NumOut() != 0 {
panic("Need 2 in and 0 out params!")
}
argT := t.In(0)
switch argT.Kind() {
case reflect.Ptr, reflect.Map:
default:
panic("fuzzFunc must take pointer or map type")
}
if t.In(1) != reflect.TypeOf(Continue{}) {
panic("fuzzFunc's second parameter must be type fuzz.Continue")
}
f.fuzzFuncs[argT] = v
}
return f
}
// RandSource causes f to get values from the given source of randomness.
// Use if you want deterministic fuzzing.
func (f *Fuzzer) RandSource(s rand.Source) *Fuzzer {
f.r = rand.New(s)
return f
}
// NilChance sets the probability of creating a nil pointer, map, or slice to
// 'p'. 'p' should be between 0 (no nils) and 1 (all nils), inclusive.
func (f *Fuzzer) NilChance(p float64) *Fuzzer {
if p < 0 || p > 1 {
panic("p should be between 0 and 1, inclusive.")
}
f.nilChance = p
return f
}
// NumElements sets the minimum and maximum number of elements that will be
// added to a non-nil map or slice.
func (f *Fuzzer) NumElements(atLeast, atMost int) *Fuzzer {
if atLeast > atMost {
panic("atLeast must be <= atMost")
}
if atLeast < 0 {
panic("atLeast must be >= 0")
}
f.minElements = atLeast
f.maxElements = atMost
return f
}
func (f *Fuzzer) genElementCount() int {
if f.minElements == f.maxElements {
return f.minElements
}
return f.minElements + f.r.Intn(f.maxElements-f.minElements+1)
}
func (f *Fuzzer) genShouldFill() bool {
return f.r.Float64() >= f.nilChance
}
// MaxDepth sets the maximum number of recursive fuzz calls that will be made
// before stopping. This includes struct members, pointers, and map and slice
// elements.
func (f *Fuzzer) MaxDepth(d int) *Fuzzer {
f.maxDepth = d
return f
}
// Skip fields which match the supplied pattern. Call this multiple times if needed
// This is useful to skip XXX_ fields generated by protobuf
func (f *Fuzzer) SkipFieldsWithPattern(pattern *regexp.Regexp) *Fuzzer {
f.skipFieldPatterns = append(f.skipFieldPatterns, pattern)
return f
}
// Fuzz recursively fills all of obj's fields with something random. First
// this tries to find a custom fuzz function (see Funcs). If there is no
// custom function this tests whether the object implements fuzz.Interface and,
// if so, calls Fuzz on it to fuzz itself. If that fails, this will see if
// there is a default fuzz function provided by this package. If all of that
// fails, this will generate random values for all primitive fields and then
// recurse for all non-primitives.
//
// This is safe for cyclic or tree-like structs, up to a limit. Use the
// MaxDepth method to adjust how deep you need it to recurse.
//
// obj must be a pointer. Only exported (public) fields can be set (thanks,
// golang :/ ) Intended for tests, so will panic on bad input or unimplemented
// fields.
func (f *Fuzzer) Fuzz(obj interface{}) {
v := reflect.ValueOf(obj)
if v.Kind() != reflect.Ptr {
panic("needed ptr!")
}
v = v.Elem()
f.fuzzWithContext(v, 0)
}
// FuzzNoCustom is just like Fuzz, except that any custom fuzz function for
// obj's type will not be called and obj will not be tested for fuzz.Interface
// conformance. This applies only to obj and not other instances of obj's
// type.
// Not safe for cyclic or tree-like structs!
// obj must be a pointer. Only exported (public) fields can be set (thanks, golang :/ )
// Intended for tests, so will panic on bad input or unimplemented fields.
func (f *Fuzzer) FuzzNoCustom(obj interface{}) {
v := reflect.ValueOf(obj)
if v.Kind() != reflect.Ptr {
panic("needed ptr!")
}
v = v.Elem()
f.fuzzWithContext(v, flagNoCustomFuzz)
}
const (
// Do not try to find a custom fuzz function. Does not apply recursively.
flagNoCustomFuzz uint64 = 1 << iota
)
func (f *Fuzzer) fuzzWithContext(v reflect.Value, flags uint64) {
fc := &fuzzerContext{fuzzer: f}
fc.doFuzz(v, flags)
}
// fuzzerContext carries context about a single fuzzing run, which lets Fuzzer
// be thread-safe.
type fuzzerContext struct {
fuzzer *Fuzzer
curDepth int
}
func (fc *fuzzerContext) doFuzz(v reflect.Value, flags uint64) {
if fc.curDepth >= fc.fuzzer.maxDepth {
return
}
fc.curDepth++
defer func() { fc.curDepth-- }()
if !v.CanSet() {
return
}
if flags&flagNoCustomFuzz == 0 {
// Check for both pointer and non-pointer custom functions.
if v.CanAddr() && fc.tryCustom(v.Addr()) {
return
}
if fc.tryCustom(v) {
return
}
}
if fn, ok := fillFuncMap[v.Kind()]; ok {
fn(v, fc.fuzzer.r)
return
}
switch v.Kind() {
case reflect.Map:
if fc.fuzzer.genShouldFill() {
v.Set(reflect.MakeMap(v.Type()))
n := fc.fuzzer.genElementCount()
for i := 0; i < n; i++ {
key := reflect.New(v.Type().Key()).Elem()
fc.doFuzz(key, 0)
val := reflect.New(v.Type().Elem()).Elem()
fc.doFuzz(val, 0)
v.SetMapIndex(key, val)
}
return
}
v.Set(reflect.Zero(v.Type()))
case reflect.Ptr:
if fc.fuzzer.genShouldFill() {
v.Set(reflect.New(v.Type().Elem()))
fc.doFuzz(v.Elem(), 0)
return
}
v.Set(reflect.Zero(v.Type()))
case reflect.Slice:
if fc.fuzzer.genShouldFill() {
n := fc.fuzzer.genElementCount()
v.Set(reflect.MakeSlice(v.Type(), n, n))
for i := 0; i < n; i++ {
fc.doFuzz(v.Index(i), 0)
}
return
}
v.Set(reflect.Zero(v.Type()))
case reflect.Array:
if fc.fuzzer.genShouldFill() {
n := v.Len()
for i := 0; i < n; i++ {
fc.doFuzz(v.Index(i), 0)
}
return
}
v.Set(reflect.Zero(v.Type()))
case reflect.Struct:
for i := 0; i < v.NumField(); i++ {
skipField := false
fieldName := v.Type().Field(i).Name
for _, pattern := range fc.fuzzer.skipFieldPatterns {
if pattern.MatchString(fieldName) {
skipField = true
break
}
}
if !skipField {
fc.doFuzz(v.Field(i), 0)
}
}
case reflect.Chan:
fallthrough
case reflect.Func:
fallthrough
case reflect.Interface:
fallthrough
default:
panic(fmt.Sprintf("Can't handle %#v", v.Interface()))
}
}
// tryCustom searches for custom handlers, and returns true iff it finds a match
// and successfully randomizes v.
func (fc *fuzzerContext) tryCustom(v reflect.Value) bool {
// First: see if we have a fuzz function for it.
doCustom, ok := fc.fuzzer.fuzzFuncs[v.Type()]
if !ok {
// Second: see if it can fuzz itself.
if v.CanInterface() {
intf := v.Interface()
if fuzzable, ok := intf.(Interface); ok {
fuzzable.Fuzz(Continue{fc: fc, Rand: fc.fuzzer.r})
return true
}
}
// Finally: see if there is a default fuzz function.
doCustom, ok = fc.fuzzer.defaultFuzzFuncs[v.Type()]
if !ok {
return false
}
}
switch v.Kind() {
case reflect.Ptr:
if v.IsNil() {
if !v.CanSet() {
return false
}
v.Set(reflect.New(v.Type().Elem()))
}
case reflect.Map:
if v.IsNil() {
if !v.CanSet() {
return false
}
v.Set(reflect.MakeMap(v.Type()))
}
default:
return false
}
doCustom.Call([]reflect.Value{v, reflect.ValueOf(Continue{
fc: fc,
Rand: fc.fuzzer.r,
})})
return true
}
// Interface represents an object that knows how to fuzz itself. Any time we
// find a type that implements this interface we will delegate the act of
// fuzzing itself.
type Interface interface {
Fuzz(c Continue)
}
// Continue can be passed to custom fuzzing functions to allow them to use
// the correct source of randomness and to continue fuzzing their members.
type Continue struct {
fc *fuzzerContext
// For convenience, Continue implements rand.Rand via embedding.
// Use this for generating any randomness if you want your fuzzing
// to be repeatable for a given seed.
*rand.Rand
}
// Fuzz continues fuzzing obj. obj must be a pointer.
func (c Continue) Fuzz(obj interface{}) {
v := reflect.ValueOf(obj)
if v.Kind() != reflect.Ptr {
panic("needed ptr!")
}
v = v.Elem()
c.fc.doFuzz(v, 0)
}
// FuzzNoCustom continues fuzzing obj, except that any custom fuzz function for
// obj's type will not be called and obj will not be tested for fuzz.Interface
// conformance. This applies only to obj and not other instances of obj's
// type.
func (c Continue) FuzzNoCustom(obj interface{}) {
v := reflect.ValueOf(obj)
if v.Kind() != reflect.Ptr {
panic("needed ptr!")
}
v = v.Elem()
c.fc.doFuzz(v, flagNoCustomFuzz)
}
// RandString makes a random string up to 20 characters long. The returned string
// may include a variety of (valid) UTF-8 encodings.
func (c Continue) RandString() string {
return randString(c.Rand)
}
// RandUint64 makes random 64 bit numbers.
// Weirdly, rand doesn't have a function that gives you 64 random bits.
func (c Continue) RandUint64() uint64 {
return randUint64(c.Rand)
}
// RandBool returns true or false randomly.
func (c Continue) RandBool() bool {
return randBool(c.Rand)
}
func fuzzInt(v reflect.Value, r *rand.Rand) {
v.SetInt(int64(randUint64(r)))
}
func fuzzUint(v reflect.Value, r *rand.Rand) {
v.SetUint(randUint64(r))
}
func fuzzTime(t *time.Time, c Continue) {
var sec, nsec int64
// Allow for about 1000 years of random time values, which keeps things
// like JSON parsing reasonably happy.
sec = c.Rand.Int63n(1000 * 365 * 24 * 60 * 60)
c.Fuzz(&nsec)
*t = time.Unix(sec, nsec)
}
var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){
reflect.Bool: func(v reflect.Value, r *rand.Rand) {
v.SetBool(randBool(r))
},
reflect.Int: fuzzInt,
reflect.Int8: fuzzInt,
reflect.Int16: fuzzInt,
reflect.Int32: fuzzInt,
reflect.Int64: fuzzInt,
reflect.Uint: fuzzUint,
reflect.Uint8: fuzzUint,
reflect.Uint16: fuzzUint,
reflect.Uint32: fuzzUint,
reflect.Uint64: fuzzUint,
reflect.Uintptr: fuzzUint,
reflect.Float32: func(v reflect.Value, r *rand.Rand) {
v.SetFloat(float64(r.Float32()))
},
reflect.Float64: func(v reflect.Value, r *rand.Rand) {
v.SetFloat(r.Float64())
},
reflect.Complex64: func(v reflect.Value, r *rand.Rand) {
v.SetComplex(complex128(complex(r.Float32(), r.Float32())))
},
reflect.Complex128: func(v reflect.Value, r *rand.Rand) {
v.SetComplex(complex(r.Float64(), r.Float64()))
},
reflect.String: func(v reflect.Value, r *rand.Rand) {
v.SetString(randString(r))
},
reflect.UnsafePointer: func(v reflect.Value, r *rand.Rand) {
panic("unimplemented")
},
}
// randBool returns true or false randomly.
func randBool(r *rand.Rand) bool {
return r.Int31()&(1<<30) == 0
}
type int63nPicker interface {
Int63n(int64) int64
}
// UnicodeRange describes a sequential range of unicode characters.
// Last must be numerically greater than First.
type UnicodeRange struct {
First, Last rune
}
// UnicodeRanges describes an arbitrary number of sequential ranges of unicode characters.
// To be useful, each range must have at least one character (First <= Last) and
// there must be at least one range.
type UnicodeRanges []UnicodeRange
// choose returns a random unicode character from the given range, using the
// given randomness source.
func (ur UnicodeRange) choose(r int63nPicker) rune {
count := int64(ur.Last - ur.First + 1)
return ur.First + rune(r.Int63n(count))
}
// CustomStringFuzzFunc constructs a FuzzFunc which produces random strings.
// Each character is selected from the range ur. If there are no characters
// in the range (cr.Last < cr.First), this will panic.
func (ur UnicodeRange) CustomStringFuzzFunc() func(s *string, c Continue) {
ur.check()
return func(s *string, c Continue) {
*s = ur.randString(c.Rand)
}
}
// check is a function that used to check whether the first of ur(UnicodeRange)
// is greater than the last one.
func (ur UnicodeRange) check() {
if ur.Last < ur.First {
panic("The last encoding must be greater than the first one.")
}
}
// randString of UnicodeRange makes a random string up to 20 characters long.
// Each character is selected form ur(UnicodeRange).
func (ur UnicodeRange) randString(r *rand.Rand) string {
n := r.Intn(20)
sb := strings.Builder{}
sb.Grow(n)
for i := 0; i < n; i++ {
sb.WriteRune(ur.choose(r))
}
return sb.String()
}
// defaultUnicodeRanges sets a default unicode range when user do not set
// CustomStringFuzzFunc() but wants fuzz string.
var defaultUnicodeRanges = UnicodeRanges{
{' ', '~'}, // ASCII characters
{'\u00a0', '\u02af'}, // Multi-byte encoded characters
{'\u4e00', '\u9fff'}, // Common CJK (even longer encodings)
}
// CustomStringFuzzFunc constructs a FuzzFunc which produces random strings.
// Each character is selected from one of the ranges of ur(UnicodeRanges).
// Each range has an equal probability of being chosen. If there are no ranges,
// or a selected range has no characters (.Last < .First), this will panic.
// Do not modify any of the ranges in ur after calling this function.
func (ur UnicodeRanges) CustomStringFuzzFunc() func(s *string, c Continue) {
// Check unicode ranges slice is empty.
if len(ur) == 0 {
panic("UnicodeRanges is empty.")
}
// if not empty, each range should be checked.
for i := range ur {
ur[i].check()
}
return func(s *string, c Continue) {
*s = ur.randString(c.Rand)
}
}
// randString of UnicodeRanges makes a random string up to 20 characters long.
// Each character is selected form one of the ranges of ur(UnicodeRanges),
// and each range has an equal probability of being chosen.
func (ur UnicodeRanges) randString(r *rand.Rand) string {
n := r.Intn(20)
sb := strings.Builder{}
sb.Grow(n)
for i := 0; i < n; i++ {
sb.WriteRune(ur[r.Intn(len(ur))].choose(r))
}
return sb.String()
}
// randString makes a random string up to 20 characters long. The returned string
// may include a variety of (valid) UTF-8 encodings.
func randString(r *rand.Rand) string {
return defaultUnicodeRanges.randString(r)
}
// randUint64 makes random 64 bit numbers.
// Weirdly, rand doesn't have a function that gives you 64 random bits.
func randUint64(r *rand.Rand) uint64 {
return uint64(r.Uint32())<<32 | uint64(r.Uint32())
}

2
vendor/github.com/jinzhu/copier/.gitignore generated vendored Normal file
View File

@ -0,0 +1,2 @@
.idea/
ttt/

20
vendor/github.com/jinzhu/copier/License generated vendored Normal file
View File

@ -0,0 +1,20 @@
The MIT License (MIT)
Copyright (c) 2015 Jinzhu
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

132
vendor/github.com/jinzhu/copier/README.md generated vendored Normal file
View File

@ -0,0 +1,132 @@
# Copier
I am a copier, I copy everything from one to another
[![test status](https://github.com/jinzhu/copier/workflows/tests/badge.svg?branch=master "test status")](https://github.com/jinzhu/copier/actions)
## Features
* Copy from field to field with same name
* Copy from method to field with same name
* Copy from field to method with same name
* Copy from slice to slice
* Copy from struct to slice
* Copy from map to map
* Enforce copying a field with a tag
* Ignore a field with a tag
* Deep Copy
## Usage
```go
package main
import (
"fmt"
"github.com/jinzhu/copier"
)
type User struct {
Name string
Role string
Age int32
EmployeeCode int64 `copier:"EmployeeNum"` // specify field name
// Explicitly ignored in the destination struct.
Salary int
}
func (user *User) DoubleAge() int32 {
return 2 * user.Age
}
// Tags in the destination Struct provide instructions to copier.Copy to ignore
// or enforce copying and to panic or return an error if a field was not copied.
type Employee struct {
// Tell copier.Copy to panic if this field is not copied.
Name string `copier:"must"`
// Tell copier.Copy to return an error if this field is not copied.
Age int32 `copier:"must,nopanic"`
// Tell copier.Copy to explicitly ignore copying this field.
Salary int `copier:"-"`
DoubleAge int32
EmployeeId int64 `copier:"EmployeeNum"` // specify field name
SuperRole string
}
func (employee *Employee) Role(role string) {
employee.SuperRole = "Super " + role
}
func main() {
var (
user = User{Name: "Jinzhu", Age: 18, Role: "Admin", Salary: 200000}
users = []User{{Name: "Jinzhu", Age: 18, Role: "Admin", Salary: 100000}, {Name: "jinzhu 2", Age: 30, Role: "Dev", Salary: 60000}}
employee = Employee{Salary: 150000}
employees = []Employee{}
)
copier.Copy(&employee, &user)
fmt.Printf("%#v \n", employee)
// Employee{
// Name: "Jinzhu", // Copy from field
// Age: 18, // Copy from field
// Salary:150000, // Copying explicitly ignored
// DoubleAge: 36, // Copy from method
// EmployeeId: 0, // Ignored
// SuperRole: "Super Admin", // Copy to method
// }
// Copy struct to slice
copier.Copy(&employees, &user)
fmt.Printf("%#v \n", employees)
// []Employee{
// {Name: "Jinzhu", Age: 18, Salary:0, DoubleAge: 36, EmployeeId: 0, SuperRole: "Super Admin"}
// }
// Copy slice to slice
employees = []Employee{}
copier.Copy(&employees, &users)
fmt.Printf("%#v \n", employees)
// []Employee{
// {Name: "Jinzhu", Age: 18, Salary:0, DoubleAge: 36, EmployeeId: 0, SuperRole: "Super Admin"},
// {Name: "jinzhu 2", Age: 30, Salary:0, DoubleAge: 60, EmployeeId: 0, SuperRole: "Super Dev"},
// }
// Copy map to map
map1 := map[int]int{3: 6, 4: 8}
map2 := map[int32]int8{}
copier.Copy(&map2, map1)
fmt.Printf("%#v \n", map2)
// map[int32]int8{3:6, 4:8}
}
```
### Copy with Option
```go
copier.CopyWithOption(&to, &from, copier.Option{IgnoreEmpty: true, DeepCopy: true})
```
## Contributing
You can help to make the project better, check out [http://gorm.io/contribute.html](http://gorm.io/contribute.html) for things you can do.
# Author
**jinzhu**
* <http://github.com/jinzhu>
* <wosmvp@gmail.com>
* <http://twitter.com/zhangjinzhu>
## License
Released under the [MIT License](https://github.com/jinzhu/copier/blob/master/License).

828
vendor/github.com/jinzhu/copier/copier.go generated vendored Normal file
View File

@ -0,0 +1,828 @@
package copier
import (
"database/sql"
"database/sql/driver"
"fmt"
"reflect"
"strings"
"sync"
"unicode"
)
// These flags define options for tag handling
const (
// Denotes that a destination field must be copied to. If copying fails then a panic will ensue.
tagMust uint8 = 1 << iota
// Denotes that the program should not panic when the must flag is on and
// value is not copied. The program will return an error instead.
tagNoPanic
// Ignore a destination field from being copied to.
tagIgnore
// Denotes that the value as been copied
hasCopied
// Some default converter types for a nicer syntax
String string = ""
Bool bool = false
Int int = 0
Float32 float32 = 0
Float64 float64 = 0
)
// Option sets copy options
type Option struct {
// setting this value to true will ignore copying zero values of all the fields, including bools, as well as a
// struct having all it's fields set to their zero values respectively (see IsZero() in reflect/value.go)
IgnoreEmpty bool
CaseSensitive bool
DeepCopy bool
Converters []TypeConverter
// Custom field name mappings to copy values with different names in `fromValue` and `toValue` types.
// Examples can be found in `copier_field_name_mapping_test.go`.
FieldNameMapping []FieldNameMapping
}
func (opt Option) converters() map[converterPair]TypeConverter {
var converters = map[converterPair]TypeConverter{}
// save converters into map for faster lookup
for i := range opt.Converters {
pair := converterPair{
SrcType: reflect.TypeOf(opt.Converters[i].SrcType),
DstType: reflect.TypeOf(opt.Converters[i].DstType),
}
converters[pair] = opt.Converters[i]
}
return converters
}
type TypeConverter struct {
SrcType interface{}
DstType interface{}
Fn func(src interface{}) (dst interface{}, err error)
}
type converterPair struct {
SrcType reflect.Type
DstType reflect.Type
}
func (opt Option) fieldNameMapping() map[converterPair]FieldNameMapping {
var mapping = map[converterPair]FieldNameMapping{}
for i := range opt.FieldNameMapping {
pair := converterPair{
SrcType: reflect.TypeOf(opt.FieldNameMapping[i].SrcType),
DstType: reflect.TypeOf(opt.FieldNameMapping[i].DstType),
}
mapping[pair] = opt.FieldNameMapping[i]
}
return mapping
}
type FieldNameMapping struct {
SrcType interface{}
DstType interface{}
Mapping map[string]string
}
// Tag Flags
type flags struct {
BitFlags map[string]uint8
SrcNames tagNameMapping
DestNames tagNameMapping
}
// Field Tag name mapping
type tagNameMapping struct {
FieldNameToTag map[string]string
TagToFieldName map[string]string
}
// Copy copy things
func Copy(toValue interface{}, fromValue interface{}) (err error) {
return copier(toValue, fromValue, Option{})
}
// CopyWithOption copy with option
func CopyWithOption(toValue interface{}, fromValue interface{}, opt Option) (err error) {
return copier(toValue, fromValue, opt)
}
func copier(toValue interface{}, fromValue interface{}, opt Option) (err error) {
var (
isSlice bool
amount = 1
from = indirect(reflect.ValueOf(fromValue))
to = indirect(reflect.ValueOf(toValue))
converters = opt.converters()
mappings = opt.fieldNameMapping()
)
if !to.CanAddr() {
return ErrInvalidCopyDestination
}
// Return is from value is invalid
if !from.IsValid() {
return ErrInvalidCopyFrom
}
fromType, isPtrFrom := indirectType(from.Type())
toType, _ := indirectType(to.Type())
if fromType.Kind() == reflect.Interface {
fromType = reflect.TypeOf(from.Interface())
}
if toType.Kind() == reflect.Interface {
toType, _ = indirectType(reflect.TypeOf(to.Interface()))
oldTo := to
to = reflect.New(reflect.TypeOf(to.Interface())).Elem()
defer func() {
oldTo.Set(to)
}()
}
// Just set it if possible to assign for normal types
if from.Kind() != reflect.Slice && from.Kind() != reflect.Struct && from.Kind() != reflect.Map && (from.Type().AssignableTo(to.Type()) || from.Type().ConvertibleTo(to.Type())) {
if !isPtrFrom || !opt.DeepCopy {
to.Set(from.Convert(to.Type()))
} else {
fromCopy := reflect.New(from.Type())
fromCopy.Set(from.Elem())
to.Set(fromCopy.Convert(to.Type()))
}
return
}
if from.Kind() != reflect.Slice && fromType.Kind() == reflect.Map && toType.Kind() == reflect.Map {
if !fromType.Key().ConvertibleTo(toType.Key()) {
return ErrMapKeyNotMatch
}
if to.IsNil() {
to.Set(reflect.MakeMapWithSize(toType, from.Len()))
}
for _, k := range from.MapKeys() {
toKey := indirect(reflect.New(toType.Key()))
isSet, err := set(toKey, k, opt.DeepCopy, converters)
if err != nil {
return err
}
if !isSet {
return fmt.Errorf("%w map, old key: %v, new key: %v", ErrNotSupported, k.Type(), toType.Key())
}
elemType := toType.Elem()
if elemType.Kind() != reflect.Slice {
elemType, _ = indirectType(elemType)
}
toValue := indirect(reflect.New(elemType))
isSet, err = set(toValue, from.MapIndex(k), opt.DeepCopy, converters)
if err != nil {
return err
}
if !isSet {
if err = copier(toValue.Addr().Interface(), from.MapIndex(k).Interface(), opt); err != nil {
return err
}
}
for {
if elemType == toType.Elem() {
to.SetMapIndex(toKey, toValue)
break
}
elemType = reflect.PtrTo(elemType)
toValue = toValue.Addr()
}
}
return
}
if from.Kind() == reflect.Slice && to.Kind() == reflect.Slice {
if to.IsNil() {
slice := reflect.MakeSlice(reflect.SliceOf(to.Type().Elem()), from.Len(), from.Cap())
to.Set(slice)
}
if fromType.ConvertibleTo(toType) {
for i := 0; i < from.Len(); i++ {
if to.Len() < i+1 {
to.Set(reflect.Append(to, reflect.New(to.Type().Elem()).Elem()))
}
isSet, err := set(to.Index(i), from.Index(i), opt.DeepCopy, converters)
if err != nil {
return err
}
if !isSet {
// ignore error while copy slice element
err = copier(to.Index(i).Addr().Interface(), from.Index(i).Interface(), opt)
if err != nil {
continue
}
}
}
return
}
}
if fromType.Kind() != reflect.Struct || toType.Kind() != reflect.Struct {
// skip not supported type
return
}
if len(converters) > 0 {
if ok, e := set(to, from, opt.DeepCopy, converters); e == nil && ok {
// converter supported
return
}
}
if from.Kind() == reflect.Slice || to.Kind() == reflect.Slice {
isSlice = true
if from.Kind() == reflect.Slice {
amount = from.Len()
}
}
for i := 0; i < amount; i++ {
var dest, source reflect.Value
if isSlice {
// source
if from.Kind() == reflect.Slice {
source = indirect(from.Index(i))
} else {
source = indirect(from)
}
// dest
dest = indirect(reflect.New(toType).Elem())
} else {
source = indirect(from)
dest = indirect(to)
}
if len(converters) > 0 {
if ok, e := set(dest, source, opt.DeepCopy, converters); e == nil && ok {
if isSlice {
// FIXME: maybe should check the other types?
if to.Type().Elem().Kind() == reflect.Ptr {
to.Index(i).Set(dest.Addr())
} else {
if to.Len() < i+1 {
reflect.Append(to, dest)
} else {
to.Index(i).Set(dest)
}
}
} else {
to.Set(dest)
}
continue
}
}
destKind := dest.Kind()
initDest := false
if destKind == reflect.Interface {
initDest = true
dest = indirect(reflect.New(toType))
}
// Get tag options
flgs, err := getFlags(dest, source, toType, fromType)
if err != nil {
return err
}
// check source
if source.IsValid() {
copyUnexportedStructFields(dest, source)
// Copy from source field to dest field or method
fromTypeFields := deepFields(fromType)
for _, field := range fromTypeFields {
name := field.Name
// Get bit flags for field
fieldFlags := flgs.BitFlags[name]
// Check if we should ignore copying
if (fieldFlags & tagIgnore) != 0 {
continue
}
fieldNamesMapping := getFieldNamesMapping(mappings, fromType, toType)
srcFieldName, destFieldName := getFieldName(name, flgs, fieldNamesMapping)
if fromField := fieldByNameOrZeroValue(source, srcFieldName); fromField.IsValid() && !shouldIgnore(fromField, opt.IgnoreEmpty) {
// process for nested anonymous field
destFieldNotSet := false
if f, ok := dest.Type().FieldByName(destFieldName); ok {
// only initialize parent embedded struct pointer in the path
for idx := range f.Index[:len(f.Index)-1] {
destField := dest.FieldByIndex(f.Index[:idx+1])
if destField.Kind() != reflect.Ptr {
continue
}
if !destField.IsNil() {
continue
}
if !destField.CanSet() {
destFieldNotSet = true
break
}
// destField is a nil pointer that can be set
newValue := reflect.New(destField.Type().Elem())
destField.Set(newValue)
}
}
if destFieldNotSet {
break
}
toField := fieldByName(dest, destFieldName, opt.CaseSensitive)
if toField.IsValid() {
if toField.CanSet() {
isSet, err := set(toField, fromField, opt.DeepCopy, converters)
if err != nil {
return err
}
if !isSet {
if err := copier(toField.Addr().Interface(), fromField.Interface(), opt); err != nil {
return err
}
}
if fieldFlags != 0 {
// Note that a copy was made
flgs.BitFlags[name] = fieldFlags | hasCopied
}
}
} else {
// try to set to method
var toMethod reflect.Value
if dest.CanAddr() {
toMethod = dest.Addr().MethodByName(destFieldName)
} else {
toMethod = dest.MethodByName(destFieldName)
}
if toMethod.IsValid() && toMethod.Type().NumIn() == 1 && fromField.Type().AssignableTo(toMethod.Type().In(0)) {
toMethod.Call([]reflect.Value{fromField})
}
}
}
}
// Copy from from method to dest field
for _, field := range deepFields(toType) {
name := field.Name
srcFieldName, destFieldName := getFieldName(name, flgs, getFieldNamesMapping(mappings, fromType, toType))
var fromMethod reflect.Value
if source.CanAddr() {
fromMethod = source.Addr().MethodByName(srcFieldName)
} else {
fromMethod = source.MethodByName(srcFieldName)
}
if fromMethod.IsValid() && fromMethod.Type().NumIn() == 0 && fromMethod.Type().NumOut() == 1 && !shouldIgnore(fromMethod, opt.IgnoreEmpty) {
if toField := fieldByName(dest, destFieldName, opt.CaseSensitive); toField.IsValid() && toField.CanSet() {
values := fromMethod.Call([]reflect.Value{})
if len(values) >= 1 {
set(toField, values[0], opt.DeepCopy, converters)
}
}
}
}
}
if isSlice && to.Kind() == reflect.Slice {
if dest.Addr().Type().AssignableTo(to.Type().Elem()) {
if to.Len() < i+1 {
to.Set(reflect.Append(to, dest.Addr()))
} else {
isSet, err := set(to.Index(i), dest.Addr(), opt.DeepCopy, converters)
if err != nil {
return err
}
if !isSet {
// ignore error while copy slice element
err = copier(to.Index(i).Addr().Interface(), dest.Addr().Interface(), opt)
if err != nil {
continue
}
}
}
} else if dest.Type().AssignableTo(to.Type().Elem()) {
if to.Len() < i+1 {
to.Set(reflect.Append(to, dest))
} else {
isSet, err := set(to.Index(i), dest, opt.DeepCopy, converters)
if err != nil {
return err
}
if !isSet {
// ignore error while copy slice element
err = copier(to.Index(i).Addr().Interface(), dest.Interface(), opt)
if err != nil {
continue
}
}
}
}
} else if initDest {
to.Set(dest)
}
err = checkBitFlags(flgs.BitFlags)
}
return
}
func getFieldNamesMapping(mappings map[converterPair]FieldNameMapping, fromType reflect.Type, toType reflect.Type) map[string]string {
var fieldNamesMapping map[string]string
if len(mappings) > 0 {
pair := converterPair{
SrcType: fromType,
DstType: toType,
}
if v, ok := mappings[pair]; ok {
fieldNamesMapping = v.Mapping
}
}
return fieldNamesMapping
}
func fieldByNameOrZeroValue(source reflect.Value, fieldName string) (value reflect.Value) {
defer func() {
if err := recover(); err != nil {
value = reflect.Value{}
}
}()
return source.FieldByName(fieldName)
}
func copyUnexportedStructFields(to, from reflect.Value) {
if from.Kind() != reflect.Struct || to.Kind() != reflect.Struct || !from.Type().AssignableTo(to.Type()) {
return
}
// create a shallow copy of 'to' to get all fields
tmp := indirect(reflect.New(to.Type()))
tmp.Set(from)
// revert exported fields
for i := 0; i < to.NumField(); i++ {
if tmp.Field(i).CanSet() {
tmp.Field(i).Set(to.Field(i))
}
}
to.Set(tmp)
}
func shouldIgnore(v reflect.Value, ignoreEmpty bool) bool {
return ignoreEmpty && v.IsZero()
}
var deepFieldsLock sync.RWMutex
var deepFieldsMap = make(map[reflect.Type][]reflect.StructField)
func deepFields(reflectType reflect.Type) []reflect.StructField {
deepFieldsLock.RLock()
cache, ok := deepFieldsMap[reflectType]
deepFieldsLock.RUnlock()
if ok {
return cache
}
var res []reflect.StructField
if reflectType, _ = indirectType(reflectType); reflectType.Kind() == reflect.Struct {
fields := make([]reflect.StructField, 0, reflectType.NumField())
for i := 0; i < reflectType.NumField(); i++ {
v := reflectType.Field(i)
// PkgPath is the package path that qualifies a lower case (unexported)
// field name. It is empty for upper case (exported) field names.
// See https://golang.org/ref/spec#Uniqueness_of_identifiers
if v.PkgPath == "" {
fields = append(fields, v)
if v.Anonymous {
// also consider fields of anonymous fields as fields of the root
fields = append(fields, deepFields(v.Type)...)
}
}
}
res = fields
}
deepFieldsLock.Lock()
deepFieldsMap[reflectType] = res
deepFieldsLock.Unlock()
return res
}
func indirect(reflectValue reflect.Value) reflect.Value {
for reflectValue.Kind() == reflect.Ptr {
reflectValue = reflectValue.Elem()
}
return reflectValue
}
func indirectType(reflectType reflect.Type) (_ reflect.Type, isPtr bool) {
for reflectType.Kind() == reflect.Ptr || reflectType.Kind() == reflect.Slice {
reflectType = reflectType.Elem()
isPtr = true
}
return reflectType, isPtr
}
func set(to, from reflect.Value, deepCopy bool, converters map[converterPair]TypeConverter) (bool, error) {
if !from.IsValid() {
return true, nil
}
if ok, err := lookupAndCopyWithConverter(to, from, converters); err != nil {
return false, err
} else if ok {
return true, nil
}
if to.Kind() == reflect.Ptr {
// set `to` to nil if from is nil
if from.Kind() == reflect.Ptr && from.IsNil() {
to.Set(reflect.Zero(to.Type()))
return true, nil
} else if to.IsNil() {
// `from` -> `to`
// sql.NullString -> *string
if fromValuer, ok := driverValuer(from); ok {
v, err := fromValuer.Value()
if err != nil {
return true, nil
}
// if `from` is not valid do nothing with `to`
if v == nil {
return true, nil
}
}
// allocate new `to` variable with default value (eg. *string -> new(string))
to.Set(reflect.New(to.Type().Elem()))
}
// depointer `to`
to = to.Elem()
}
if deepCopy {
toKind := to.Kind()
if toKind == reflect.Interface && to.IsNil() {
if reflect.TypeOf(from.Interface()) != nil {
to.Set(reflect.New(reflect.TypeOf(from.Interface())).Elem())
toKind = reflect.TypeOf(to.Interface()).Kind()
}
}
if from.Kind() == reflect.Ptr && from.IsNil() {
return true, nil
}
if _, ok := to.Addr().Interface().(sql.Scanner); !ok && (toKind == reflect.Struct || toKind == reflect.Map || toKind == reflect.Slice) {
return false, nil
}
}
if from.Type().ConvertibleTo(to.Type()) {
to.Set(from.Convert(to.Type()))
} else if toScanner, ok := to.Addr().Interface().(sql.Scanner); ok {
// `from` -> `to`
// *string -> sql.NullString
if from.Kind() == reflect.Ptr {
// if `from` is nil do nothing with `to`
if from.IsNil() {
return true, nil
}
// depointer `from`
from = indirect(from)
}
// `from` -> `to`
// string -> sql.NullString
// set `to` by invoking method Scan(`from`)
err := toScanner.Scan(from.Interface())
if err != nil {
return false, nil
}
} else if fromValuer, ok := driverValuer(from); ok {
// `from` -> `to`
// sql.NullString -> string
v, err := fromValuer.Value()
if err != nil {
return false, nil
}
// if `from` is not valid do nothing with `to`
if v == nil {
return true, nil
}
rv := reflect.ValueOf(v)
if rv.Type().AssignableTo(to.Type()) {
to.Set(rv)
} else if to.CanSet() && rv.Type().ConvertibleTo(to.Type()) {
to.Set(rv.Convert(to.Type()))
}
} else if from.Kind() == reflect.Ptr {
return set(to, from.Elem(), deepCopy, converters)
} else {
return false, nil
}
return true, nil
}
// lookupAndCopyWithConverter looks up the type pair, on success the TypeConverter Fn func is called to copy src to dst field.
func lookupAndCopyWithConverter(to, from reflect.Value, converters map[converterPair]TypeConverter) (copied bool, err error) {
pair := converterPair{
SrcType: from.Type(),
DstType: to.Type(),
}
if cnv, ok := converters[pair]; ok {
result, err := cnv.Fn(from.Interface())
if err != nil {
return false, err
}
if result != nil {
to.Set(reflect.ValueOf(result))
} else {
// in case we've got a nil value to copy
to.Set(reflect.Zero(to.Type()))
}
return true, nil
}
return false, nil
}
// parseTags Parses struct tags and returns uint8 bit flags.
func parseTags(tag string) (flg uint8, name string, err error) {
for _, t := range strings.Split(tag, ",") {
switch t {
case "-":
flg = tagIgnore
return
case "must":
flg = flg | tagMust
case "nopanic":
flg = flg | tagNoPanic
default:
if unicode.IsUpper([]rune(t)[0]) {
name = strings.TrimSpace(t)
} else {
err = ErrFieldNameTagStartNotUpperCase
}
}
}
return
}
// getTagFlags Parses struct tags for bit flags, field name.
func getFlags(dest, src reflect.Value, toType, fromType reflect.Type) (flags, error) {
flgs := flags{
BitFlags: map[string]uint8{},
SrcNames: tagNameMapping{
FieldNameToTag: map[string]string{},
TagToFieldName: map[string]string{},
},
DestNames: tagNameMapping{
FieldNameToTag: map[string]string{},
TagToFieldName: map[string]string{},
},
}
var toTypeFields, fromTypeFields []reflect.StructField
if dest.IsValid() {
toTypeFields = deepFields(toType)
}
if src.IsValid() {
fromTypeFields = deepFields(fromType)
}
// Get a list dest of tags
for _, field := range toTypeFields {
tags := field.Tag.Get("copier")
if tags != "" {
var name string
var err error
if flgs.BitFlags[field.Name], name, err = parseTags(tags); err != nil {
return flags{}, err
} else if name != "" {
flgs.DestNames.FieldNameToTag[field.Name] = name
flgs.DestNames.TagToFieldName[name] = field.Name
}
}
}
// Get a list source of tags
for _, field := range fromTypeFields {
tags := field.Tag.Get("copier")
if tags != "" {
var name string
var err error
if _, name, err = parseTags(tags); err != nil {
return flags{}, err
} else if name != "" {
flgs.SrcNames.FieldNameToTag[field.Name] = name
flgs.SrcNames.TagToFieldName[name] = field.Name
}
}
}
return flgs, nil
}
// checkBitFlags Checks flags for error or panic conditions.
func checkBitFlags(flagsList map[string]uint8) (err error) {
// Check flag conditions were met
for name, flgs := range flagsList {
if flgs&hasCopied == 0 {
switch {
case flgs&tagMust != 0 && flgs&tagNoPanic != 0:
err = fmt.Errorf("field %s has must tag but was not copied", name)
return
case flgs&(tagMust) != 0:
panic(fmt.Sprintf("Field %s has must tag but was not copied", name))
}
}
}
return
}
func getFieldName(fieldName string, flgs flags, fieldNameMapping map[string]string) (srcFieldName string, destFieldName string) {
// get dest field name
if name, ok := fieldNameMapping[fieldName]; ok {
srcFieldName = fieldName
destFieldName = name
return
}
if srcTagName, ok := flgs.SrcNames.FieldNameToTag[fieldName]; ok {
destFieldName = srcTagName
if destTagName, ok := flgs.DestNames.TagToFieldName[srcTagName]; ok {
destFieldName = destTagName
}
} else {
if destTagName, ok := flgs.DestNames.TagToFieldName[fieldName]; ok {
destFieldName = destTagName
}
}
if destFieldName == "" {
destFieldName = fieldName
}
// get source field name
if destTagName, ok := flgs.DestNames.FieldNameToTag[fieldName]; ok {
srcFieldName = destTagName
if srcField, ok := flgs.SrcNames.TagToFieldName[destTagName]; ok {
srcFieldName = srcField
}
} else {
if srcField, ok := flgs.SrcNames.TagToFieldName[fieldName]; ok {
srcFieldName = srcField
}
}
if srcFieldName == "" {
srcFieldName = fieldName
}
return
}
func driverValuer(v reflect.Value) (i driver.Valuer, ok bool) {
if !v.CanAddr() {
i, ok = v.Interface().(driver.Valuer)
return
}
i, ok = v.Addr().Interface().(driver.Valuer)
return
}
func fieldByName(v reflect.Value, name string, caseSensitive bool) reflect.Value {
if caseSensitive {
return v.FieldByName(name)
}
return v.FieldByNameFunc(func(n string) bool { return strings.EqualFold(n, name) })
}

11
vendor/github.com/jinzhu/copier/errors.go generated vendored Normal file
View File

@ -0,0 +1,11 @@
package copier
import "errors"
var (
ErrInvalidCopyDestination = errors.New("copy destination must be non-nil and addressable")
ErrInvalidCopyFrom = errors.New("copy from must be non-nil and addressable")
ErrMapKeyNotMatch = errors.New("map's key type doesn't match")
ErrNotSupported = errors.New("not supported")
ErrFieldNameTagStartNotUpperCase = errors.New("copier field name tag must be start upper case")
)

View File

@ -56,6 +56,11 @@ type Backup struct {
SingleFile BackupFile `json:",omitempty"`
}
type LastBackupInfo struct {
Name string
SnapshotCreatedAt string
}
var (
backupstoreBase = "backupstore"
)

View File

@ -1196,12 +1196,16 @@ func checkBlockReferenceCount(blockInfos map[string]*BlockInfo, backup *Backup,
}
}
func copyLastBackupInfo(backup *Backup, lastBackup *LastBackupInfo) {
lastBackup.Name = backup.Name
lastBackup.SnapshotCreatedAt = backup.SnapshotCreatedAt
}
// getLatestBackup replace lastBackup object if the found
// backup.SnapshotCreatedAt time is greater than the lastBackup
func getLatestBackup(backup *Backup, lastBackup *Backup) error {
func getLatestBackup(backup *Backup, lastBackup *LastBackupInfo) error {
if lastBackup.SnapshotCreatedAt == "" {
// FIXME - go lint points out that this copies a potentially locked sync.mutex
*lastBackup = *backup // nolint:govet
copyLastBackupInfo(backup, lastBackup)
return nil
}
@ -1216,8 +1220,7 @@ func getLatestBackup(backup *Backup, lastBackup *Backup) error {
}
if backupTime.After(lastBackupTime) {
// FIXME - go lint points out that this copies a potentially locked sync.mutex
*lastBackup = *backup // nolint:govet
copyLastBackupInfo(backup, lastBackup)
}
return nil
@ -1299,7 +1302,7 @@ func DeleteDeltaBlockBackup(backupURL string) error {
}
}
lastBackup := &Backup{}
lastBackup := &LastBackupInfo{}
for _, name := range backupNames {
log := log.WithField("backup", name)
backup, err := loadBackup(bsDriver, name, volumeName)

View File

@ -18,7 +18,7 @@ type Bitmap struct {
// NewBitmap allocate a bitmap range from [start, end], notice the end is included
func NewBitmap(start, end int32) (*Bitmap, error) {
if end < start {
return nil, fmt.Errorf("Invalid range, end (%v) cannot be less than start (%v)", end, start)
return nil, fmt.Errorf("invalid range, end (%v) cannot be less than start (%v)", end, start)
}
size := end - start + 1

View File

@ -122,7 +122,11 @@ func CopyFile(sourcePath, destinationPath string, overWrite bool) error {
if err != nil {
return err
}
defer sourceFile.Close()
defer func() {
if errClose := sourceFile.Close(); errClose != nil {
logrus.WithError(errClose).Errorf("Failed to close source file %v", sourcePath)
}
}()
_, err = CreateDirectory(filepath.Dir(destinationPath), sourceFileInfo.ModTime())
if err != nil {
@ -133,7 +137,11 @@ func CopyFile(sourcePath, destinationPath string, overWrite bool) error {
if err != nil {
return err
}
defer destinationFile.Close()
defer func() {
if errClose := destinationFile.Close(); errClose != nil {
logrus.WithError(errClose).Errorf("Failed to close destination file %v", destinationPath)
}
}()
_, err = io.Copy(destinationFile, sourceFile)
if err != nil {
@ -217,7 +225,11 @@ func SyncFile(filePath string) error {
if err != nil {
return err
}
defer file.Close()
defer func() {
if errClose := file.Close(); errClose != nil {
logrus.WithError(errClose).Errorf("Failed to close file %v", filePath)
}
}()
return file.Sync()
}
@ -315,7 +327,11 @@ func IsDirectoryEmpty(directory string) (bool, error) {
if err != nil {
return false, err
}
defer f.Close()
defer func() {
if errClose := f.Close(); errClose != nil {
logrus.WithError(errClose).Errorf("Failed to close directory %v", directory)
}
}()
_, err = f.Readdirnames(1)
if err == io.EOF {

View File

@ -1,15 +1,18 @@
package ns
import (
"os/exec"
"time"
"github.com/pkg/errors"
"github.com/longhorn/go-common-libs/types"
)
// LuksOpen runs cryptsetup luksOpen with the given passphrase and
// returns the stdout and error.
func (nsexec *Executor) LuksOpen(volume, devicePath, passphrase string, timeout time.Duration) (stdout string, err error) {
args := []string{"luksOpen", devicePath, volume, "-d", "/dev/stdin"}
args := []string{"luksOpen", devicePath, volume, "-d", "-"}
return nsexec.CryptsetupWithPassphrase(passphrase, args, timeout)
}
@ -29,7 +32,7 @@ func (nsexec *Executor) LuksFormat(devicePath, passphrase, keyCipher, keyHash, k
"--hash", keyHash,
"--key-size", keySize,
"--pbkdf", pbkdf,
devicePath, "-d", "/dev/stdin",
devicePath, "-d", "-",
}
return nsexec.CryptsetupWithPassphrase(passphrase, args, timeout)
}
@ -47,6 +50,24 @@ func (nsexec *Executor) LuksStatus(volume string, timeout time.Duration) (stdout
return nsexec.Cryptsetup(args, timeout)
}
// IsLuks checks if the device is encrypted with LUKS.
func (nsexec *Executor) IsLuks(devicePath string, timeout time.Duration) (bool, error) {
args := []string{"isLuks", devicePath}
_, err := nsexec.Cryptsetup(args, timeout)
if err == nil {
return true, nil
}
var exitErr *exec.ExitError
if errors.As(err, &exitErr) {
if exitErr.ExitCode() == 1 {
// The device is not encrypted if exit code of 1 is returned
// Ref https://gitlab.com/cryptsetup/cryptsetup/-/blob/main/FAQ.md?plain=1#L2848
return false, nil
}
}
return false, err
}
// Cryptsetup runs cryptsetup without passphrase. It will return
// 0 on success and a non-zero value on error.
func (nsexec *Executor) Cryptsetup(args []string, timeout time.Duration) (stdout string, err error) {

View File

@ -10,6 +10,31 @@ import (
"github.com/longhorn/go-common-libs/types"
)
// GetArch switches to the host namespace and retrieves the system architecture.
func GetArch() (string, error) {
var err error
defer func() {
err = errors.Wrap(err, "failed to get system architecture")
}()
fn := func() (interface{}, error) {
return sys.GetArch()
}
rawResult, err := RunFunc(fn, 0)
if err != nil {
return "", err
}
var result string
var ableToCast bool
result, ableToCast = rawResult.(string)
if !ableToCast {
return "", errors.Errorf(types.ErrNamespaceCastResultFmt, result, rawResult)
}
return result, nil
}
// GetKernelRelease switches to the host namespace and retrieves the kernel release.
func GetKernelRelease() (string, error) {
var err error

View File

@ -19,7 +19,31 @@ import (
commonio "github.com/longhorn/go-common-libs/io"
)
// GetKernelRelease returns the kernel release string.
// GetArch retrieves the system architecture by calling the unix.Uname function
// and extracting the architecture information from the Utsname structure.
// It returns the architecture as a string and an error if the operation fails.
func GetArch() (string, error) {
utsname := &unix.Utsname{}
if err := unix.Uname(utsname); err != nil {
logrus.WithError(err).Warn("Failed to get system architecture")
return "", err
}
// Extract the architecture from the Utsname structure
arch := make([]byte, 0, len(utsname.Machine))
for _, b := range utsname.Machine {
if b == 0x00 {
logrus.Trace("Found end of architecture string [0x00]")
break
}
arch = append(arch, byte(b))
}
return string(arch), nil
}
// GetKernelRelease retrieves the kernel release by calling the unix.Uname function
// and extracting the release information from the Utsname structure.
// It returns the kernel release as a string and an error if the operation fails.
func GetKernelRelease() (string, error) {
utsname := &unix.Utsname{}
if err := unix.Uname(utsname); err != nil {
@ -172,12 +196,20 @@ func GetProcKernelConfigMap(procDir string) (configMap map[string]string, err er
if err != nil {
return nil, err
}
defer configFile.Close()
defer func() {
if errClose := configFile.Close(); errClose != nil {
logrus.WithError(errClose).Errorf("Failed to close config file %s", configPath)
}
}()
gzReader, err := gzip.NewReader(configFile)
if err != nil {
return nil, err
}
defer gzReader.Close()
defer func() {
if errClose := gzReader.Close(); errClose != nil {
logrus.WithError(errClose).Errorf("Failed to close gzip reader for config file %s", configPath)
}
}()
return parseKernelModuleConfigMap(gzReader)
}

View File

@ -72,7 +72,7 @@ func RandomID(randomIDLenth int) string {
randomIDLenth = types.RandomIDDefaultLength
}
uuid := strings.Replace(UUID(), "-", "", -1)
uuid := strings.ReplaceAll(UUID(), "-", "")
if len(uuid) > randomIDLenth {
uuid = uuid[:randomIDLenth]

View File

@ -11,6 +11,7 @@ import (
"time"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
lhexec "github.com/longhorn/go-common-libs/exec"
lhtypes "github.com/longhorn/go-common-libs/types"
@ -212,7 +213,11 @@ func StartDaemon(debug bool) error {
}
func startDaemon(logf *os.File, debug bool) {
defer logf.Close()
defer func() {
if errClose := logf.Close(); errClose != nil {
logrus.WithError(errClose).Error("Failed to close log file")
}
}()
opts := []string{
"-f",
@ -226,13 +231,13 @@ func startDaemon(logf *os.File, debug bool) {
cmd.Stderr = mw
if err := cmd.Run(); err != nil {
if CheckTargetForBackingStore("rdwr") {
fmt.Fprintf(mw, "go-iscsi-helper: tgtd is already running\n")
_, _ = fmt.Fprintf(mw, "go-iscsi-helper: tgtd is already running\n")
return
}
fmt.Fprintf(mw, "go-iscsi-helper: command failed: %v\n", err)
_, _ = fmt.Fprintf(mw, "go-iscsi-helper: command failed: %v\n", err)
panic(err)
}
fmt.Fprintln(mw, "go-iscsi-helper: done")
_, _ = fmt.Fprintln(mw, "go-iscsi-helper: done")
}
func CheckTargetForBackingStore(name string) bool {

View File

@ -75,7 +75,7 @@ func NewDevice(name, backingFile, bsType, bsOpts string, scsiTimeout, iscsiAbort
}
func Volume2ISCSIName(name string) string {
return strings.Replace(name, "_", ":", -1)
return strings.ReplaceAll(name, "_", ":")
}
func GetTargetName(volumeName string) string {

View File

@ -1,4 +1,4 @@
package nvme
package initiator
import (
"fmt"
@ -14,6 +14,8 @@ import (
commonns "github.com/longhorn/go-common-libs/ns"
commontypes "github.com/longhorn/go-common-libs/types"
"github.com/longhorn/go-spdk-helper/pkg/jsonrpc"
"github.com/longhorn/go-spdk-helper/pkg/spdk/client"
"github.com/longhorn/go-spdk-helper/pkg/types"
"github.com/longhorn/go-spdk-helper/pkg/util"
)
@ -25,6 +27,11 @@ const (
HostProc = "/host/proc"
validateDiskCreationTimeout = 30 // seconds
UnInitializedUblkId = -1
MaxUblkId = 65535
DefaultUblkQueueDepth = 128
DefaultUblkNumberOfQueue = 1
)
const (
@ -35,18 +42,19 @@ const (
waitDeviceInterval = 1 * time.Second
)
type Initiator struct {
Name string
SubsystemNQN string
UUID string
TransportAddress string
TransportServiceID string
var (
idGenerator IDGenerator
isUblkTargetCreated = false
)
Endpoint string
ControllerName string
NamespaceName string
dev *util.LonghornBlockDevice
isUp bool
type Initiator struct {
Name string
Endpoint string
dev *util.LonghornBlockDevice
isUp bool
NVMeTCPInfo *NVMeTCPInfo
UblkInfo *UblkInfo
hostProc string
executor *commonns.Executor
@ -54,16 +62,34 @@ type Initiator struct {
logger logrus.FieldLogger
}
// NewInitiator creates a new NVMe-oF initiator
func NewInitiator(name, subsystemNQN, hostProc string) (*Initiator, error) {
if name == "" {
return nil, fmt.Errorf("empty name for NVMe-oF initiator creation")
}
type NVMeTCPInfo struct {
SubsystemNQN string
UUID string
TransportAddress string
TransportServiceID string
ControllerName string
NamespaceName string
}
if subsystemNQN == "" {
type UblkInfo struct {
BdevName string
UblkID int32
}
// NewInitiator creates a new initiator
func NewInitiator(name, hostProc string, nvmeTCPInfo *NVMeTCPInfo, ublkInfo *UblkInfo) (*Initiator, error) {
if name == "" {
return nil, fmt.Errorf("empty name for initiator creation")
}
if (ublkInfo == nil && nvmeTCPInfo == nil) || (ublkInfo != nil && nvmeTCPInfo != nil) {
return nil, fmt.Errorf("cannot initiator creation because both nvmeTCPInfo and ublkInfo are nil or non-nil: nvmeTCPInfo: %v, ublkInfo: %v", nvmeTCPInfo, ublkInfo)
}
if nvmeTCPInfo != nil && nvmeTCPInfo.SubsystemNQN == "" {
return nil, fmt.Errorf("empty subsystem for NVMe-oF initiator creation")
}
if ublkInfo != nil && ublkInfo.BdevName == "" {
return nil, fmt.Errorf("empty BdevName for ublk initiator creation")
}
// If transportAddress or transportServiceID is empty, the initiator is still valid for stopping
executor, err := util.NewExecutor(commontypes.ProcDirectory)
if err != nil {
@ -71,17 +97,19 @@ func NewInitiator(name, subsystemNQN, hostProc string) (*Initiator, error) {
}
return &Initiator{
Name: name,
SubsystemNQN: subsystemNQN,
Name: name,
Endpoint: util.GetLonghornDevicePath(name),
NVMeTCPInfo: nvmeTCPInfo,
UblkInfo: ublkInfo,
hostProc: hostProc,
executor: executor,
logger: logrus.WithFields(logrus.Fields{
"name": name,
"subsystemNQN": subsystemNQN,
"name": name,
"nvmeTCPInfo": fmt.Sprintf("%+v", nvmeTCPInfo),
"ublkInfo": fmt.Sprintf("%+v", ublkInfo),
}),
}, nil
}
@ -99,8 +127,8 @@ func (i *Initiator) newLock() (*commonns.FileLock, error) {
return lock, nil
}
// DiscoverTarget discovers a target
func (i *Initiator) DiscoverTarget(ip, port string) (string, error) {
// DiscoverNVMeTCPTarget discovers a target
func (i *Initiator) DiscoverNVMeTCPTarget(ip, port string) (string, error) {
if i.hostProc != "" {
lock, err := i.newLock()
if err != nil {
@ -112,8 +140,8 @@ func (i *Initiator) DiscoverTarget(ip, port string) (string, error) {
return DiscoverTarget(ip, port, i.executor)
}
// ConnectTarget connects to a target
func (i *Initiator) ConnectTarget(ip, port, nqn string) (string, error) {
// ConnectNVMeTCPTarget connects to a target
func (i *Initiator) ConnectNVMeTCPTarget(ip, port, nqn string) (string, error) {
if i.hostProc != "" {
lock, err := i.newLock()
if err != nil {
@ -125,8 +153,11 @@ func (i *Initiator) ConnectTarget(ip, port, nqn string) (string, error) {
return ConnectTarget(ip, port, nqn, i.executor)
}
// DisconnectTarget disconnects a target
func (i *Initiator) DisconnectTarget() error {
// DisconnectNVMeTCPTarget disconnects a target
func (i *Initiator) DisconnectNVMeTCPTarget() error {
if i.NVMeTCPInfo == nil {
return fmt.Errorf("failed to DisconnectNVMeTCPTarget because nvmeTCPInfo is nil")
}
if i.hostProc != "" {
lock, err := i.newLock()
if err != nil {
@ -135,11 +166,14 @@ func (i *Initiator) DisconnectTarget() error {
defer lock.Unlock()
}
return DisconnectTarget(i.SubsystemNQN, i.executor)
return DisconnectTarget(i.NVMeTCPInfo.SubsystemNQN, i.executor)
}
// WaitForConnect waits for the NVMe-oF initiator to connect
func (i *Initiator) WaitForConnect(maxRetries int, retryInterval time.Duration) (err error) {
// WaitForNVMeTCPConnect waits for the NVMe-oF initiator to connect
func (i *Initiator) WaitForNVMeTCPConnect(maxRetries int, retryInterval time.Duration) (err error) {
if i.NVMeTCPInfo == nil {
return fmt.Errorf("failed to WaitForNVMeTCPConnect because nvmeTCPInfo is nil")
}
if i.hostProc != "" {
lock, err := i.newLock()
if err != nil {
@ -149,7 +183,7 @@ func (i *Initiator) WaitForConnect(maxRetries int, retryInterval time.Duration)
}
for r := 0; r < maxRetries; r++ {
err = i.loadNVMeDeviceInfoWithoutLock(i.TransportAddress, i.TransportServiceID, i.SubsystemNQN)
err = i.loadNVMeDeviceInfoWithoutLock(i.NVMeTCPInfo.TransportAddress, i.NVMeTCPInfo.TransportServiceID, i.NVMeTCPInfo.SubsystemNQN)
if err == nil {
return nil
}
@ -159,8 +193,11 @@ func (i *Initiator) WaitForConnect(maxRetries int, retryInterval time.Duration)
return err
}
// WaitForDisconnect waits for the NVMe-oF initiator to disconnect
func (i *Initiator) WaitForDisconnect(maxRetries int, retryInterval time.Duration) (err error) {
// WaitForNVMeTCPTargetDisconnect waits for the NVMe-oF initiator to disconnect
func (i *Initiator) WaitForNVMeTCPTargetDisconnect(maxRetries int, retryInterval time.Duration) (err error) {
if i.NVMeTCPInfo == nil {
return fmt.Errorf("failed to WaitForNVMeTCPTargetDisconnect because nvmeTCPInfo is nil")
}
if i.hostProc != "" {
lock, err := i.newLock()
if err != nil {
@ -170,7 +207,7 @@ func (i *Initiator) WaitForDisconnect(maxRetries int, retryInterval time.Duratio
}
for r := 0; r < maxRetries; r++ {
err = i.loadNVMeDeviceInfoWithoutLock(i.TransportAddress, i.TransportServiceID, i.SubsystemNQN)
err = i.loadNVMeDeviceInfoWithoutLock(i.NVMeTCPInfo.TransportAddress, i.NVMeTCPInfo.TransportServiceID, i.NVMeTCPInfo.SubsystemNQN)
if types.ErrorIsValidNvmeDeviceNotFound(err) {
return nil
}
@ -230,33 +267,36 @@ func (i *Initiator) resumeLinearDmDevice() error {
func (i *Initiator) replaceDmDeviceTarget() error {
suspended, err := i.IsSuspended()
if err != nil {
return errors.Wrapf(err, "failed to check if linear dm device is suspended for NVMe-oF initiator %s", i.Name)
return errors.Wrapf(err, "failed to check if linear dm device is suspended for initiator %s", i.Name)
}
if !suspended {
if err := i.suspendLinearDmDevice(true, false); err != nil {
return errors.Wrapf(err, "failed to suspend linear dm device for NVMe-oF initiator %s", i.Name)
return errors.Wrapf(err, "failed to suspend linear dm device for initiator %s", i.Name)
}
}
if err := i.reloadLinearDmDevice(); err != nil {
return errors.Wrapf(err, "failed to reload linear dm device for NVMe-oF initiator %s", i.Name)
return errors.Wrapf(err, "failed to reload linear dm device for initiator %s", i.Name)
}
if err := i.resumeLinearDmDevice(); err != nil {
return errors.Wrapf(err, "failed to resume linear dm device for NVMe-oF initiator %s", i.Name)
return errors.Wrapf(err, "failed to resume linear dm device for initiator %s", i.Name)
}
return nil
}
// Start starts the NVMe-oF initiator with the given transportAddress and transportServiceID
func (i *Initiator) Start(transportAddress, transportServiceID string, dmDeviceAndEndpointCleanupRequired bool) (dmDeviceIsBusy bool, err error) {
// StartNvmeTCPInitiator starts the NVMe-oF initiator with the given transportAddress and transportServiceID
func (i *Initiator) StartNvmeTCPInitiator(transportAddress, transportServiceID string, dmDeviceAndEndpointCleanupRequired bool) (dmDeviceIsBusy bool, err error) {
defer func() {
if err != nil {
err = errors.Wrapf(err, "failed to start NVMe-oF initiator %s", i.Name)
}
}()
if i.NVMeTCPInfo == nil {
return false, fmt.Errorf("nvmeTCPInfo is nil")
}
if transportAddress == "" || transportServiceID == "" {
return false, fmt.Errorf("invalid transportAddress %s and transportServiceID %s for starting initiator %s", transportAddress, transportServiceID, i.Name)
}
@ -276,10 +316,10 @@ func (i *Initiator) Start(transportAddress, transportServiceID string, dmDeviceA
}
// Check if the initiator/NVMe-oF device is already launched and matches the params
err = i.loadNVMeDeviceInfoWithoutLock(i.TransportAddress, i.TransportServiceID, i.SubsystemNQN)
err = i.loadNVMeDeviceInfoWithoutLock(i.NVMeTCPInfo.TransportAddress, i.NVMeTCPInfo.TransportServiceID, i.NVMeTCPInfo.SubsystemNQN)
if err == nil {
if i.TransportAddress == transportAddress && i.TransportServiceID == transportServiceID {
err = i.LoadEndpoint(false)
if i.NVMeTCPInfo.TransportAddress == transportAddress && i.NVMeTCPInfo.TransportServiceID == transportServiceID {
err = i.LoadEndpointForNvmeTcpFrontend(false)
if err == nil {
i.logger.Info("NVMe-oF initiator is already launched with correct params")
return false, nil
@ -291,15 +331,15 @@ func (i *Initiator) Start(transportAddress, transportServiceID string, dmDeviceA
}
i.logger.Info("Stopping NVMe-oF initiator blindly before starting")
dmDeviceIsBusy, err = i.stopWithoutLock(dmDeviceAndEndpointCleanupRequired, false, false)
dmDeviceIsBusy, err = i.stopWithoutLock(nil, dmDeviceAndEndpointCleanupRequired, false, false)
if err != nil {
return dmDeviceIsBusy, errors.Wrapf(err, "failed to stop the mismatching NVMe-oF initiator %s before starting", i.Name)
}
i.logger.Info("Launching NVMe-oF initiator")
i.connectTarget(transportAddress, transportServiceID, maxConnectTargetRetries, retryConnectTargetInterval)
if i.ControllerName == "" {
i.connectNVMeTCPTarget(transportAddress, transportServiceID, maxConnectTargetRetries, retryConnectTargetInterval)
if i.NVMeTCPInfo.ControllerName == "" {
return dmDeviceIsBusy, fmt.Errorf("failed to start NVMe-oF initiator %s within %d * %v sec retries", i.Name, maxConnectTargetRetries, retryConnectTargetInterval.Seconds())
}
@ -326,7 +366,7 @@ func (i *Initiator) Start(transportAddress, transportServiceID string, dmDeviceA
}
} else {
i.logger.Info("Skipping creating linear dm device for NVMe-oF initiator")
i.dev.Export = i.dev.Nvme
i.dev.Export = i.dev.Source
}
i.logger.Infof("Creating endpoint %v", i.Endpoint)
@ -347,9 +387,127 @@ func (i *Initiator) Start(transportAddress, transportServiceID string, dmDeviceA
return dmDeviceIsBusy, nil
}
func (i *Initiator) StartUblkInitiator(spdkClient *client.Client, dmDeviceAndEndpointCleanupRequired bool) (dmDeviceIsBusy bool, err error) {
defer func() {
if err != nil {
err = errors.Wrapf(err, "failed to start ublk initiator %s", i.Name)
}
}()
if i.UblkInfo == nil {
return false, fmt.Errorf("UblkInfo is nil")
}
if i.hostProc != "" {
lock, err := i.newLock()
if err != nil {
return false, err
}
defer lock.Unlock()
}
if !isUblkTargetCreated {
if err := spdkClient.UblkCreateTarget("", true); err != nil {
return false, err
}
isUblkTargetCreated = true
}
ublkDeviceList, err := spdkClient.UblkGetDisks(0)
if err != nil {
return false, err
}
i.logger.Info("Stopping ublk initiator blindly before starting")
for _, ublkDevice := range ublkDeviceList {
if ublkDevice.BdevName == i.UblkInfo.BdevName {
if err := spdkClient.UblkStopDisk(ublkDevice.ID); err != nil && !jsonrpc.IsJSONRPCRespErrorNoSuchDevice(err) {
return false, err
}
}
}
dmDeviceIsBusy, err = i.stopWithoutLock(spdkClient, dmDeviceAndEndpointCleanupRequired, false, false)
if err != nil {
return dmDeviceIsBusy, errors.Wrapf(err, "failed to stop the ublk initiator %s before starting", i.Name)
}
i.logger.Info("Launching ublk initiator")
ublkDeviceList, err = spdkClient.UblkGetDisks(0)
if err != nil {
return false, err
}
availableUblkID, err := idGenerator.GetAvailableID(ublkDeviceList)
if err != nil {
return false, err
}
if err := spdkClient.UblkStartDisk(i.UblkInfo.BdevName, availableUblkID, DefaultUblkQueueDepth, DefaultUblkNumberOfQueue); err != nil {
return false, err
}
i.UblkInfo.UblkID = availableUblkID
i.logger = i.logger.WithFields(logrus.Fields{
"ublkInfo": fmt.Sprintf("%+v", i.UblkInfo),
})
devicePath, err := spdkClient.FindUblkDevicePath(i.UblkInfo.UblkID)
if err != nil {
return false, err
}
dev, err := util.DetectDevice(devicePath, i.executor)
if err != nil {
return dmDeviceIsBusy, errors.Wrapf(err, "cannot find the device for ublk initiator %s at path %s", i.Name, devicePath)
}
i.dev = &util.LonghornBlockDevice{
Source: *dev,
}
if dmDeviceAndEndpointCleanupRequired {
if dmDeviceIsBusy {
// Endpoint is already created, just replace the target device
i.logger.Info("Linear dm device is busy, trying the best to replace the target device for ublk initiator")
if err := i.replaceDmDeviceTarget(); err != nil {
i.logger.WithError(err).Warnf("Failed to replace the target device for ublk initiator")
} else {
i.logger.Info("Successfully replaced the target device for ublk initiator")
dmDeviceIsBusy = false
}
} else {
i.logger.Info("Creating linear dm device for ublk initiator")
if err := i.createLinearDmDevice(); err != nil {
return false, errors.Wrapf(err, "failed to create linear dm device for ublk initiator %s", i.Name)
}
}
} else {
i.logger.Info("Skipping creating linear dm device for ublk initiator")
i.dev.Export = i.dev.Source
}
i.logger.Infof("Creating endpoint %v", i.Endpoint)
exist, err := i.isEndpointExist()
if err != nil {
return dmDeviceIsBusy, errors.Wrapf(err, "failed to check if endpoint %v exists for ublk initiator %s", i.Endpoint, i.Name)
}
if exist {
i.logger.Infof("Skipping endpoint %v creation for ublk initiator", i.Endpoint)
} else {
if err := i.makeEndpoint(); err != nil {
return dmDeviceIsBusy, err
}
}
i.logger.Infof("Launched ublk initiator: %+v", i)
return dmDeviceIsBusy, nil
}
func (i *Initiator) waitAndLoadNVMeDeviceInfoWithoutLock(transportAddress, transportServiceID string) (err error) {
if i.NVMeTCPInfo == nil {
return fmt.Errorf("failed to waitAndLoadNVMeDeviceInfoWithoutLock because nvmeTCPInfo is nil")
}
for r := 0; r < maxWaitDeviceRetries; r++ {
err = i.loadNVMeDeviceInfoWithoutLock(transportAddress, transportServiceID, i.SubsystemNQN)
err = i.loadNVMeDeviceInfoWithoutLock(transportAddress, transportServiceID, i.NVMeTCPInfo.SubsystemNQN)
if err == nil {
break
}
@ -358,7 +516,10 @@ func (i *Initiator) waitAndLoadNVMeDeviceInfoWithoutLock(transportAddress, trans
return err
}
func (i *Initiator) connectTarget(transportAddress, transportServiceID string, maxRetries int, retryInterval time.Duration) {
func (i *Initiator) connectNVMeTCPTarget(transportAddress, transportServiceID string, maxRetries int, retryInterval time.Duration) {
if i.NVMeTCPInfo == nil {
logrus.Warnf("Failed to connectTarget because nvmeTCPInfo is nil")
}
for r := 0; r < maxRetries; r++ {
// Rerun this API for a discovered target should be fine
subsystemNQN, err := DiscoverTarget(transportAddress, transportServiceID, i.executor)
@ -375,14 +536,14 @@ func (i *Initiator) connectTarget(transportAddress, transportServiceID string, m
continue
}
i.SubsystemNQN = subsystemNQN
i.ControllerName = controllerName
i.NVMeTCPInfo.SubsystemNQN = subsystemNQN
i.NVMeTCPInfo.ControllerName = controllerName
break
}
}
// Stop stops the NVMe-oF initiator
func (i *Initiator) Stop(dmDeviceAndEndpointCleanupRequired, deferDmDeviceCleanup, returnErrorForBusyDevice bool) (bool, error) {
func (i *Initiator) Stop(spdkClient *client.Client, dmDeviceAndEndpointCleanupRequired, deferDmDeviceCleanup, returnErrorForBusyDevice bool) (bool, error) {
if i.hostProc != "" {
lock, err := i.newLock()
if err != nil {
@ -391,10 +552,10 @@ func (i *Initiator) Stop(dmDeviceAndEndpointCleanupRequired, deferDmDeviceCleanu
defer lock.Unlock()
}
return i.stopWithoutLock(dmDeviceAndEndpointCleanupRequired, deferDmDeviceCleanup, returnErrorForBusyDevice)
return i.stopWithoutLock(spdkClient, dmDeviceAndEndpointCleanupRequired, deferDmDeviceCleanup, returnErrorForBusyDevice)
}
func (i *Initiator) stopWithoutLock(dmDeviceAndEndpointCleanupRequired, deferDmDeviceCleanup, returnErrorForBusyDevice bool) (dmDeviceIsBusy bool, err error) {
func (i *Initiator) stopWithoutLock(spdkClient *client.Client, dmDeviceAndEndpointCleanupRequired, deferDmDeviceCleanup, returnErrorForBusyDevice bool) (dmDeviceIsBusy bool, err error) {
dmDeviceIsBusy = false
if dmDeviceAndEndpointCleanupRequired {
@ -418,37 +579,66 @@ func (i *Initiator) stopWithoutLock(dmDeviceAndEndpointCleanupRequired, deferDmD
}
}
err = DisconnectTarget(i.SubsystemNQN, i.executor)
if err != nil {
return dmDeviceIsBusy, errors.Wrapf(err, "failed to disconnect target for NVMe-oF initiator %s", i.Name)
// stopping NvmeTcp initiator
if i.NVMeTCPInfo != nil {
err = DisconnectTarget(i.NVMeTCPInfo.SubsystemNQN, i.executor)
if err != nil {
return dmDeviceIsBusy, errors.Wrapf(err, "failed to disconnect target for NVMe-oF initiator %s", i.Name)
}
i.NVMeTCPInfo.ControllerName = ""
i.NVMeTCPInfo.NamespaceName = ""
i.NVMeTCPInfo.TransportAddress = ""
i.NVMeTCPInfo.TransportServiceID = ""
return dmDeviceIsBusy, nil
}
i.ControllerName = ""
i.NamespaceName = ""
i.TransportAddress = ""
i.TransportServiceID = ""
// stopping ublk initiator
if spdkClient == nil || i.UblkInfo == nil {
return dmDeviceIsBusy, fmt.Errorf("failed to stop ublk initiator because spdkClient or UblkInfo is nil: spdkClient: %v, UblkInfo: %v", spdkClient, i.UblkInfo)
}
if i.UblkInfo.UblkID == UnInitializedUblkId {
return dmDeviceIsBusy, nil
}
if err := spdkClient.UblkStopDisk(i.UblkInfo.UblkID); err != nil {
if jsonrpc.IsJSONRPCRespErrorNoSuchDevice(err) {
return dmDeviceIsBusy, nil
}
return dmDeviceIsBusy, err
}
return dmDeviceIsBusy, nil
}
// GetControllerName returns the controller name
func (i *Initiator) GetControllerName() string {
return i.ControllerName
if i.NVMeTCPInfo == nil {
return ""
}
return i.NVMeTCPInfo.ControllerName
}
// GetNamespaceName returns the namespace name
func (i *Initiator) GetNamespaceName() string {
return i.NamespaceName
if i.NVMeTCPInfo == nil {
return ""
}
return i.NVMeTCPInfo.NamespaceName
}
// GetTransportAddress returns the transport address
func (i *Initiator) GetTransportAddress() string {
return i.TransportAddress
if i.NVMeTCPInfo == nil {
return ""
}
return i.NVMeTCPInfo.TransportAddress
}
// GetTransportServiceID returns the transport service ID
func (i *Initiator) GetTransportServiceID() string {
return i.TransportServiceID
if i.NVMeTCPInfo == nil {
return ""
}
return i.NVMeTCPInfo.TransportServiceID
}
// GetEndpoint returns the endpoint
@ -473,6 +663,9 @@ func (i *Initiator) LoadNVMeDeviceInfo(transportAddress, transportServiceID, sub
}
func (i *Initiator) loadNVMeDeviceInfoWithoutLock(transportAddress, transportServiceID, subsystemNQN string) error {
if i.NVMeTCPInfo == nil {
return fmt.Errorf("failed to loadNVMeDeviceInfoWithoutLock because nvmeTCPInfo is nil")
}
nvmeDevices, err := GetDevices(transportAddress, transportServiceID, subsystemNQN, i.executor)
if err != nil {
return err
@ -483,35 +676,38 @@ func (i *Initiator) loadNVMeDeviceInfoWithoutLock(transportAddress, transportSer
if len(nvmeDevices[0].Namespaces) != 1 {
return fmt.Errorf("found zero or multiple devices for NVMe-oF initiator %s", i.Name)
}
if i.ControllerName != "" && i.ControllerName != nvmeDevices[0].Controllers[0].Controller {
return fmt.Errorf("found mismatching between the detected controller name %s and the recorded value %s for NVMe-oF initiator %s", nvmeDevices[0].Controllers[0].Controller, i.ControllerName, i.Name)
if i.NVMeTCPInfo.ControllerName != "" && i.NVMeTCPInfo.ControllerName != nvmeDevices[0].Controllers[0].Controller {
return fmt.Errorf("found mismatching between the detected controller name %s and the recorded value %s for NVMe-oF initiator %s", nvmeDevices[0].Controllers[0].Controller, i.NVMeTCPInfo.ControllerName, i.Name)
}
i.ControllerName = nvmeDevices[0].Controllers[0].Controller
i.NamespaceName = nvmeDevices[0].Namespaces[0].NameSpace
i.TransportAddress, i.TransportServiceID = GetIPAndPortFromControllerAddress(nvmeDevices[0].Controllers[0].Address)
i.NVMeTCPInfo.ControllerName = nvmeDevices[0].Controllers[0].Controller
i.NVMeTCPInfo.NamespaceName = nvmeDevices[0].Namespaces[0].NameSpace
i.NVMeTCPInfo.TransportAddress, i.NVMeTCPInfo.TransportServiceID = GetIPAndPortFromControllerAddress(nvmeDevices[0].Controllers[0].Address)
i.logger = i.logger.WithFields(logrus.Fields{
"controllerName": i.ControllerName,
"namespaceName": i.NamespaceName,
"transportAddress": i.TransportAddress,
"transportServiceID": i.TransportServiceID,
"controllerName": i.NVMeTCPInfo.ControllerName,
"namespaceName": i.NVMeTCPInfo.NamespaceName,
"transportAddress": i.NVMeTCPInfo.TransportAddress,
"transportServiceID": i.NVMeTCPInfo.TransportServiceID,
})
devPath := filepath.Join("/dev", i.NamespaceName)
devPath := filepath.Join("/dev", i.NVMeTCPInfo.NamespaceName)
dev, err := util.DetectDevice(devPath, i.executor)
if err != nil {
return errors.Wrapf(err, "cannot find the device for NVMe-oF initiator %s with namespace name %s", i.Name, i.NamespaceName)
return errors.Wrapf(err, "cannot find the device for NVMe-oF initiator %s with namespace name %s", i.Name, i.NVMeTCPInfo.NamespaceName)
}
i.dev = &util.LonghornBlockDevice{
Nvme: *dev,
Source: *dev,
}
return nil
}
func (i *Initiator) isNamespaceExist(devices []string) bool {
if i.NVMeTCPInfo == nil {
return false
}
for _, device := range devices {
if device == i.NamespaceName {
if device == i.NVMeTCPInfo.NamespaceName {
return true
}
}
@ -526,8 +722,11 @@ func (i *Initiator) findDependentDevices(devName string) ([]string, error) {
return depDevices, nil
}
// LoadEndpoint loads the endpoint
func (i *Initiator) LoadEndpoint(dmDeviceIsBusy bool) error {
// LoadEndpointForNvmeTcpFrontend loads the endpoint
func (i *Initiator) LoadEndpointForNvmeTcpFrontend(dmDeviceIsBusy bool) error {
if i.NVMeTCPInfo == nil {
return fmt.Errorf("failed to LoadEndpointForNvmeTcpFrontend because nvmeTCPInfo is nil")
}
dev, err := util.DetectDevice(i.Endpoint, i.executor)
if err != nil {
return err
@ -541,7 +740,7 @@ func (i *Initiator) LoadEndpoint(dmDeviceIsBusy bool) error {
if dmDeviceIsBusy {
i.logger.Debugf("Skipping endpoint %v loading due to device busy", i.Endpoint)
} else {
if i.NamespaceName != "" && !i.isNamespaceExist(depDevices) {
if i.NVMeTCPInfo.NamespaceName != "" && !i.isNamespaceExist(depDevices) {
return fmt.Errorf("detected device %s name mismatching from endpoint %v for NVMe-oF initiator %s", dev.Name, i.Endpoint, i.Name)
}
}
@ -601,7 +800,7 @@ func (i *Initiator) createLinearDmDevice() error {
return fmt.Errorf("found nil device for linear dm device creation")
}
nvmeDevPath := fmt.Sprintf("/dev/%s", i.dev.Nvme.Name)
nvmeDevPath := fmt.Sprintf("/dev/%s", i.dev.Source.Name)
sectors, err := util.GetDeviceSectorSize(nvmeDevPath, i.executor)
if err != nil {
return err
@ -680,7 +879,7 @@ func (i *Initiator) IsSuspended() (bool, error) {
}
func (i *Initiator) reloadLinearDmDevice() error {
devPath := fmt.Sprintf("/dev/%s", i.dev.Nvme.Name)
devPath := fmt.Sprintf("/dev/%s", i.dev.Source.Name)
// Get the size of the device
opts := []string{

View File

@ -1,4 +1,4 @@
package nvme
package initiator
import (
"fmt"

View File

@ -1,4 +1,4 @@
package nvme
package initiator
import (
"encoding/json"

View File

@ -0,0 +1,41 @@
package initiator
import (
"fmt"
spdktypes "github.com/longhorn/go-spdk-helper/pkg/spdk/types"
)
// IDGenerator tracks only the last ID returned (which is never 0).
// Valid range for returned IDs is [1..MaxUblkId].
type IDGenerator struct {
lastCandidate int32
}
// GetAvailableID returns an ID in [1..65535] that is NOT in inUseUblkDeviceList.
// We do a circular search, skipping ID=0 entirely.
// This function is not thread safe. Caller need to have their own locking mechanism
func (gen *IDGenerator) GetAvailableID(inUseUblkDeviceList []spdktypes.UblkDevice) (int32, error) {
// Make a set for quick membership checks
inUsedMap := make(map[int32]struct{}, len(inUseUblkDeviceList))
for _, ublkDevice := range inUseUblkDeviceList {
inUsedMap[ublkDevice.ID] = struct{}{}
}
// We'll try up to MaxUblkId times, because 0 is excluded
for i := int32(1); i <= MaxUblkId; i++ {
candidate := (gen.lastCandidate + i) & 0xFFFF // same as % 65536, but faster with bitwise
if candidate == 0 {
continue
}
if _, used := inUsedMap[candidate]; !used {
// Found an available ID
gen.lastCandidate = candidate
return candidate, nil
}
}
// If every ID in [1..MaxUblkId] is used, no ID is available
return 0, fmt.Errorf("no available ID (1..%v are all in use)", MaxUblkId)
}

View File

@ -38,7 +38,10 @@ type Client struct {
sem chan interface{}
msgWrapperQueue chan *messageWrapper
respReceiverQueue chan *Response
responseChans map[uint32]chan *Response
// TODO: may need to launch a cleanup mechanism for the entries that has been there for a long time.
responseChans map[uint32]chan *Response
responseChanInfoMap map[uint32]string
}
type messageWrapper struct {
@ -63,10 +66,11 @@ func NewClient(ctx context.Context, conn net.Conn) *Client {
encoder: json.NewEncoder(conn),
decoder: json.NewDecoder(conn),
sem: make(chan interface{}, DefaultConcurrentLimit),
msgWrapperQueue: make(chan *messageWrapper, DefaultConcurrentLimit),
respReceiverQueue: make(chan *Response, DefaultConcurrentLimit),
responseChans: make(map[uint32]chan *Response),
sem: make(chan interface{}, DefaultConcurrentLimit),
msgWrapperQueue: make(chan *messageWrapper, DefaultConcurrentLimit),
respReceiverQueue: make(chan *Response, DefaultConcurrentLimit),
responseChans: make(map[uint32]chan *Response),
responseChanInfoMap: make(map[uint32]string),
}
c.encoder.SetIndent("", "\t")
@ -166,6 +170,7 @@ func (c *Client) handleSend(msgWrapper *messageWrapper) {
c.idCounter++
c.responseChans[id] = msgWrapper.responseChan
c.responseChanInfoMap[id] = fmt.Sprintf("method: %s, params: %+v", msgWrapper.method, msgWrapper.params)
}
func (c *Client) handleRecv(resp *Response) {
@ -174,12 +179,14 @@ func (c *Client) handleRecv(resp *Response) {
logrus.Warnf("Cannot find the response channel during handleRecv, will discard response: %+v", resp)
return
}
info := c.responseChanInfoMap[resp.ID]
delete(c.responseChans, resp.ID)
delete(c.responseChanInfoMap, resp.ID)
select {
case ch <- resp:
default:
logrus.Errorf("Response receiver queue is full when sending response id %v", resp.ID)
logrus.Errorf("The caller is no longer waiting for the response %+v, %v", resp.Result, info)
}
close(ch)
}
@ -223,14 +230,12 @@ func (c *Client) read() {
continue
}
if !queueTimer.Stop() {
<-queueTimer.C
}
queueTimer.Stop()
queueTimer.Reset(DefaultQueueBlockingTimeout)
select {
case c.respReceiverQueue <- &resp:
case <-queueTimer.C:
logrus.Errorf("Response receiver queue is blocked for over %v second when sending response id %v", DefaultQueueBlockingTimeout, resp.ID)
logrus.Errorf("Response receiver queue is blocked for over %v second when sending response: %+v", DefaultQueueBlockingTimeout, resp)
}
}
}
@ -259,13 +264,13 @@ func (c *Client) SendMsgAsyncWithTimeout(method string, params interface{}, time
select {
case <-c.ctx.Done():
return nil, fmt.Errorf("context done during async message send")
return nil, fmt.Errorf("context done during async message send, method %s, params %+v", method, params)
case c.sem <- nil:
defer func() {
<-c.sem
}()
case <-timer.C:
return nil, fmt.Errorf("timeout %v getting semaphores during async message send", timeout)
return nil, fmt.Errorf("timeout %v getting semaphores during async message send, method %s, params %+v", timeout, method, params)
}
marshaledParams, err := json.Marshal(params)
@ -285,21 +290,21 @@ func (c *Client) SendMsgAsyncWithTimeout(method string, params interface{}, time
select {
case <-c.ctx.Done():
return nil, fmt.Errorf("context done during async message send")
return nil, fmt.Errorf("context done during async message send, method %s, params %+v", method, params)
case c.msgWrapperQueue <- msgWrapper:
case <-timer.C:
return nil, fmt.Errorf("timeout %v queueing message during async message send", timeout)
return nil, fmt.Errorf("timeout %v queueing message during async message send, method %s, params %+v", timeout, method, params)
}
select {
case <-c.ctx.Done():
return nil, fmt.Errorf("context done during async message send")
return nil, fmt.Errorf("context done during async message send, method %s, params %+v", method, params)
case resp = <-responseChan:
if resp == nil {
return nil, fmt.Errorf("received nil response during async message send, maybe the response channel somehow is closed")
return nil, fmt.Errorf("received nil response during async message send, maybe the response channel somehow is closed, method %s, params %+v", method, params)
}
case <-timer.C:
return nil, fmt.Errorf("timeout %v waiting for response during async message send", timeout)
return nil, fmt.Errorf("timeout %v waiting for response during async message send, method %s, params %+v", timeout, method, params)
}
if resp.ErrorInfo != nil {

View File

@ -27,8 +27,9 @@ type RespErrorMsg string
type RespErrorCode int32
const (
RespErrorCodeNoFileExists = -17
RespErrorCodeNoSuchDevice = -19
RespErrorCodeNoSuchProcess = -3
RespErrorCodeNoFileExists = -17
RespErrorCodeNoSuchDevice = -19
)
type Response struct {
@ -55,10 +56,23 @@ type JSONClientError struct {
}
func (re JSONClientError) Error() string {
return fmt.Sprintf("error sending message, id %d, method %s, params %v: %v",
return fmt.Sprintf("error sending message, id %d, method %s, params %+v: %v",
re.ID, re.Method, re.Params, re.ErrorDetail)
}
func IsJSONRPCRespErrorNoSuchProcess(err error) bool {
jsonRPCError, ok := err.(JSONClientError)
if !ok {
return false
}
responseError, ok := jsonRPCError.ErrorDetail.(*ResponseError)
if !ok {
return false
}
return responseError.Code == RespErrorCodeNoSuchProcess
}
func IsJSONRPCRespErrorNoSuchDevice(err error) bool {
jsonRPCError, ok := err.(JSONClientError)
if !ok {

View File

@ -2,6 +2,7 @@ package client
import (
"encoding/json"
"fmt"
"strconv"
"strings"
@ -18,6 +19,7 @@ type Xattr struct {
const (
UserCreated = "user_created"
SnapshotTimestamp = "snapshot_timestamp"
SnapshotChecksum = "snapshot_checksum"
)
// BdevGetBdevs get information about block devices (bdevs).
@ -211,6 +213,22 @@ func (c *Client) BdevLvolCreate(lvstoreName, lvstoreUUID, lvolName string, sizeI
return uuid, json.Unmarshal(cmdOutput, &uuid)
}
// BdevLvolSetXattr sets extended attribute of a logical volume.
func (c *Client) BdevLvolSetXattr(name, xattrName string, xattrValue string) (set bool, err error) {
req := spdktypes.BdevLvolSetXattrRequest{
Name: name,
XattrName: xattrName,
XattrValue: xattrValue,
}
cmdOutput, err := c.jsonCli.SendCommand("bdev_lvol_set_xattr", req)
if err != nil {
return false, err
}
return set, json.Unmarshal(cmdOutput, &set)
}
// BdevLvolGetXattr gets the value of an extended attribute of a logical volume.
func (c *Client) BdevLvolGetXattr(name, xattrName string) (value string, err error) {
req := spdktypes.BdevLvolGetXattrRequest{
@ -242,6 +260,23 @@ func (c *Client) BdevLvolDelete(name string) (deleted bool, err error) {
return deleted, json.Unmarshal(cmdOutput, &deleted)
}
// BdevLvolGetByName gets information about a single lvol bdevs with the specified name.
//
// "name": Required. UUID or alias of a logical volume (lvol) bdev.
// The alias of a lvol bdev is <LVSTORE NAME>/<LVOL NAME>. And the name of a lvol bdev is UUID.
//
// "timeout": Optional. 0 by default, meaning the method returns immediately whether the lvol bdev exists or not.
func (c *Client) BdevLvolGetByName(name string, timeout uint64) (bdevLvol spdktypes.BdevInfo, err error) {
bdevLvolList, err := c.BdevLvolGetWithFilter(name, timeout, func(*spdktypes.BdevInfo) bool { return true })
if err != nil {
return spdktypes.BdevInfo{}, err
}
if len(bdevLvolList) != 1 {
return spdktypes.BdevInfo{}, fmt.Errorf("zero or multiple lvols with name %s found", name)
}
return bdevLvolList[0], nil
}
// BdevLvolGet gets information about lvol bdevs if a name is not specified.
//
// "name": Optional. UUID or alias of a logical volume (lvol) bdev.
@ -296,6 +331,12 @@ func (c *Client) BdevLvolGetWithFilter(name string, timeout uint64, filter func(
if err == nil {
b.DriverSpecific.Lvol.Xattrs[SnapshotTimestamp] = snapshot_timestamp
}
if b.DriverSpecific.Lvol.Snapshot {
checksum, err := c.BdevLvolGetSnapshotChecksum(b.Name)
if err == nil {
b.DriverSpecific.Lvol.Xattrs[SnapshotChecksum] = checksum
}
}
bdevLvolInfoList = append(bdevLvolInfoList, b)
}
@ -387,6 +428,24 @@ func (c *Client) BdevLvolDecoupleParent(name string) (decoupled bool, err error)
return decoupled, json.Unmarshal(cmdOutput, &decoupled)
}
// BdevLvolDetachParent detach the parent of a logical volume.
// No new clusters are allocated to the child blob, no data are copied from the parent to the child, so lvol's data are not modified.
// The parent must be a standard snapshot, not an external snapshot. All dependencies on the parent are removed
//
// "name": Required. UUID or alias of the logical volume to detach the parent of it. The alias of a lvol is <LVSTORE NAME>/<LVOL NAME>.
func (c *Client) BdevLvolDetachParent(name string) (decoupled bool, err error) {
req := spdktypes.BdevLvolDetachParentRequest{
Name: name,
}
cmdOutput, err := c.jsonCli.SendCommandWithLongTimeout("bdev_lvol_detach_parent", req)
if err != nil {
return false, err
}
return decoupled, json.Unmarshal(cmdOutput, &decoupled)
}
// BdevLvolSetParent sets a snapshot as the parent of a lvol, making the lvol a clone/child of this snapshot.
// The previous parent of the lvol can be another snapshot or an external snapshot, if the lvol is not a clone must be thin-provisioned.
// Lvol and parent snapshot must have the same size and must belong to the same lvol store.
@ -454,7 +513,39 @@ func (c *Client) BdevLvolStartShallowCopy(srcLvolName, dstBdevName string) (oper
return shallowCopy.OperationId, nil
}
// BdevLvolStartRangeShallowCopy start a range shallow copy of lvol over a given bdev.
// For the indexes specified in the array, clusters allocated to the lvol will be written on the bdev,
// for the others an unmap command is sent to the bdev.
// Returns the operation ID needed to check the shallow copy status with BdevLvolCheckShallowCopy.
//
// "srcLvolName": Required. UUID or alias of lvol to create a copy from.
//
// "dstBdevName": Required. Name of the bdev that acts as destination for the copy.
//
// "clusters": Required. Array of clusters indexes to be synchronized with copy or unmap.
func (c *Client) BdevLvolStartRangeShallowCopy(srcLvolName, dstBdevName string, clusters []uint64) (operationId uint32, err error) {
req := spdktypes.BdevLvolRangeShallowCopyRequest{
SrcLvolName: srcLvolName,
DstBdevName: dstBdevName,
Clusters: clusters,
}
cmdOutput, err := c.jsonCli.SendCommand("bdev_lvol_start_range_shallow_copy", req)
if err != nil {
return 0, err
}
shallowCopy := spdktypes.ShallowCopy{}
err = json.Unmarshal(cmdOutput, &shallowCopy)
if err != nil {
return 0, err
}
return shallowCopy.OperationId, nil
}
// BdevLvolCheckShallowCopy check the status of a shallow copy previously started.
// It can be used to check both BdevLvolStartShallowCopy and BdevLvolStartRangeShallowCopy.
//
// "operationId": Required. Operation ID of the shallow copy to check.
func (c *Client) BdevLvolCheckShallowCopy(operationId uint32) (*spdktypes.ShallowCopyStatus, error) {
@ -521,23 +612,89 @@ func (c *Client) BdevLvolRegisterSnapshotChecksum(name string) (registered bool,
// BdevLvolGetSnapshotChecksum gets snapshot's stored checksum. The checksum must has been previously registered.
//
// "name": Required. UUID or alias of the snapshot. The alias of a snapshot is <LVSTORE NAME>/<SNAPSHOT NAME>.
func (c *Client) BdevLvolGetSnapshotChecksum(name string) (checksum *uint64, err error) {
func (c *Client) BdevLvolGetSnapshotChecksum(name string) (checksum string, err error) {
req := spdktypes.BdevLvolGetSnapshotChecksumRequest{
Name: name,
}
cmdOutput, err := c.jsonCli.SendCommandWithLongTimeout("bdev_lvol_get_snapshot_checksum", req)
if err != nil {
return nil, err
return "", err
}
var snapshotChecksum spdktypes.BdevLvolSnapshotChecksum
err = json.Unmarshal(cmdOutput, &snapshotChecksum)
if err != nil {
return "", err
}
return strconv.FormatUint(snapshotChecksum.Checksum, 10), nil
}
// BdevLvolRegisterRangeChecksums compute and store a checksum for the whole snapshot and a checksum for every snapshot's cluster data. Overwrite old checksums if already registered.
//
// "name": Required. UUID or alias of the snapshot. The alias of a snapshot is <LVSTORE NAME>/<SNAPSHOT NAME>.
func (c *Client) BdevLvolRegisterRangeChecksums(name string) (registered bool, err error) {
req := spdktypes.BdevLvolRegisterRangeChecksumsRequest{
Name: name,
}
cmdOutput, err := c.jsonCli.SendCommandWithLongTimeout("bdev_lvol_register_snapshot_range_checksums", req)
if err != nil {
return false, err
}
return registered, json.Unmarshal(cmdOutput, &registered)
}
// BdevLvolGetRangeChecksums gets snapshot's stored checksums for the clusters in the range. The checksums must have been previously registered.
//
// "name": Required. UUID or alias of the snapshot. The alias of a snapshot is <LVSTORE NAME>/<SNAPSHOT NAME>.
//
// "clusterStartIndex": Required. The index of the first cluster in the range.
//
// "clusterCount": Required. The number of clusters in the range.
func (c *Client) BdevLvolGetRangeChecksums(name string, clusterStartIndex, clusterCount uint64) (dataChecksums map[uint64]uint64, err error) {
req := spdktypes.BdevLvolGetRangeChecksumsRequest{
Name: name,
ClusterStartIndex: clusterStartIndex,
ClusterCount: clusterCount,
}
cmdOutput, err := c.jsonCli.SendCommandWithLongTimeout("bdev_lvol_get_snapshot_range_checksums", req)
if err != nil {
return nil, err
}
return &snapshotChecksum.Checksum, nil
var rangeChecksums []spdktypes.BdevLvolRangeChecksum
err = json.Unmarshal(cmdOutput, &rangeChecksums)
if err != nil {
return nil, err
}
dataChecksums = make(map[uint64]uint64)
for _, clusterChecksum := range rangeChecksums {
dataChecksums[clusterChecksum.ClusterIndex] = clusterChecksum.Checksum
}
return dataChecksums, nil
}
// BdevLvolStopSnapshotChecksum stop an ongoing registration of a snapshot's checksum.
// It can be used to stop both BdevLvolRegisterSnapshotChecksum and BdevLvolRegisterRangeChecksums.
//
// "name": Required. UUID or alias of the snapshot. The alias of a snapshot is <LVSTORE NAME>/<SNAPSHOT NAME>.
func (c *Client) BdevLvolStopSnapshotChecksum(name string) (registered bool, err error) {
req := spdktypes.BdevLvolStopSnapshotChecksumRequest{
Name: name,
}
cmdOutput, err := c.jsonCli.SendCommand("bdev_lvol_stop_snapshot_checksum", req)
if err != nil {
return false, err
}
return registered, json.Unmarshal(cmdOutput, &registered)
}
// BdevLvolRename renames a logical volume.
@ -1270,3 +1427,63 @@ func (c *Client) BdevVirtioDetachController(name string) (deleted bool, err erro
return deleted, json.Unmarshal(cmdOutput, &deleted)
}
// BdevGetIostat get I/O statistics of block devices (bdevs).
//
// "name": Optional. If this is not specified, the function will list all block devices.
//
// "per_channel": Optional. Display per channel data for specified block device.
func (c *Client) BdevGetIostat(name string, perChannel bool) (resp *spdktypes.BdevIostatResponse, err error) {
req := spdktypes.BdevIostatRequest{
Name: name,
PerChannel: perChannel,
}
cmdOutput, err := c.jsonCli.SendCommand("bdev_get_iostat", req)
if err != nil {
return nil, err
}
resp = &spdktypes.BdevIostatResponse{}
if err := json.Unmarshal(cmdOutput, resp); err != nil {
return nil, errors.Wrap(err, "failed to parse bdev_get_iostat response")
}
return resp, nil
}
// BdevSetQosLimit sets the quality of service rate limits on a bdev.
//
// "name": Required. Block device name to apply QoS settings to.
//
// "rw_ios_per_sec": Optional. Number of R/W I/Os per second to allow. 0 means unlimited.
//
// "rw_mbytes_per_sec": Optional. Number of R/W megabytes per second to allow. 0 means unlimited.
//
// "r_mbytes_per_sec": Optional. Number of Read megabytes per second to allow. 0 means unlimited.
//
// "w_mbytes_per_sec": Optional. Number of Write megabytes per second to allow. 0 means unlimited.
func (c *Client) BdevSetQosLimit(bdevName string, rwIOsPerSec, rwMBPerSec, rMBPerSec, wMBPerSec int64) error {
params := map[string]interface{}{
"name": bdevName,
"rw_ios_per_sec": rwIOsPerSec,
"rw_mbytes_per_sec": rwMBPerSec,
"r_mbytes_per_sec": rMBPerSec,
"w_mbytes_per_sec": wMBPerSec,
}
resp, err := c.jsonCli.SendCommand("bdev_set_qos_limit", params)
if err != nil {
return errors.Wrap(err, "failed to send bdev_set_qos_limit")
}
var result bool
if err := json.Unmarshal(resp, &result); err != nil {
return errors.Wrapf(err, "invalid response format: %s", string(resp))
}
if !result {
return fmt.Errorf("SPDK returned false for bdev_set_qos_limit")
}
return nil
}

View File

@ -0,0 +1,96 @@
package client
import (
"encoding/json"
"fmt"
"github.com/pkg/errors"
spdktypes "github.com/longhorn/go-spdk-helper/pkg/spdk/types"
)
func (c *Client) UblkCreateTarget(cpumask string, disableUserCopy bool) (err error) {
req := spdktypes.UblkCreateTargetRequest{
Cpumask: cpumask,
DisableUserCopy: disableUserCopy,
}
cmdOutput, err := c.jsonCli.SendCommand("ublk_create_target", req)
if err != nil {
return errors.Wrapf(err, "failed to UblkCreateTarget: %v", string(cmdOutput))
}
return nil
}
func (c *Client) UblkDestroyTarget() (err error) {
cmdOutput, err := c.jsonCli.SendCommand("ublk_destroy_target", struct{}{})
if err != nil {
return errors.Wrapf(err, "failed to UblkDestroyTarget: %v", string(cmdOutput))
}
return nil
}
// UblkGetDisks displays full or specified ublk device list
func (c *Client) UblkGetDisks(ublkID int32) (ublkDeviceList []spdktypes.UblkDevice, err error) {
req := spdktypes.UblkGetDisksRequest{
UblkId: ublkID,
}
cmdOutput, err := c.jsonCli.SendCommand("ublk_get_disks", req)
if err != nil {
return nil, errors.Wrapf(err, "failed to create UblkGetDisks: %v", string(cmdOutput))
}
return ublkDeviceList, json.Unmarshal(cmdOutput, &ublkDeviceList)
}
func (c *Client) UblkStartDisk(bdevName string, ublkId, queueDepth, numQueues int32) (err error) {
req := spdktypes.UblkStartDiskRequest{
BdevName: bdevName,
UblkId: ublkId,
QueueDepth: queueDepth,
NumQueues: numQueues,
}
cmdOutput, err := c.jsonCli.SendCommand("ublk_start_disk", req)
if err != nil {
return errors.Wrapf(err, "failed to UblkStartDisk: %v", string(cmdOutput))
}
return nil
}
func (c *Client) UblkRecoverDisk(bdevName string, ublkId int32) (err error) {
req := spdktypes.UblkRecoverDiskRequest{
BdevName: bdevName,
UblkId: ublkId,
}
cmdOutput, err := c.jsonCli.SendCommand("ublk_recover_disk", req)
if err != nil {
return errors.Wrapf(err, "failed to UblkRecoverDisk: %v", string(cmdOutput))
}
return nil
}
func (c *Client) UblkStopDisk(ublkId int32) (err error) {
req := spdktypes.UblkStopDiskRequest{
UblkId: ublkId,
}
cmdOutput, err := c.jsonCli.SendCommand("ublk_stop_disk", req)
if err != nil {
return errors.Wrapf(err, "failed to UblkStopDisk: %v", string(cmdOutput))
}
return nil
}
func (c *Client) FindUblkDevicePath(ublkID int32) (string, error) {
ublkDeviceList, err := c.UblkGetDisks(ublkID)
if err != nil {
return "", err
}
devicePath := ""
for _, ublkDevice := range ublkDeviceList {
if ublkDevice.ID == ublkID {
if devicePath != "" {
return "", fmt.Errorf("found multiple ublk device with the id %v", ublkID)
}
devicePath = ublkDevice.UblkDevice
}
}
return devicePath, nil
}

View File

@ -135,3 +135,30 @@ type BdevLvolFragmap struct {
NumAllocatedClusters uint64 `json:"num_allocated_clusters"`
Fragmap string `json:"fragmap"`
}
type BdevIostatRequest struct {
Name string `json:"name,omitempty"`
PerChannel bool `json:"per_channel,omitempty"`
}
type BdevIostatResponse struct {
TickRate uint64 `json:"tick_rate"`
Ticks uint64 `json:"ticks"`
Bdevs []BdevStats `json:"bdevs"`
}
type BdevStats struct {
Name string `json:"name"`
BytesRead uint64 `json:"bytes_read"`
NumReadOps uint64 `json:"num_read_ops"`
BytesWritten uint64 `json:"bytes_written"`
NumWriteOps uint64 `json:"num_write_ops"`
BytesUnmapped uint64 `json:"bytes_unmapped"`
NumUnmapOps uint64 `json:"num_unmap_ops"`
ReadLatencyTicks uint64 `json:"read_latency_ticks"`
WriteLatencyTicks uint64 `json:"write_latency_ticks"`
UnmapLatencyTicks uint64 `json:"unmap_latency_ticks"`
QueueDepth uint64 `json:"queue_depth"`
IoTime uint64 `json:"io_time"`
WeightedIoTime uint64 `json:"weighted_io_time"`
}

View File

@ -47,10 +47,11 @@ type ShallowCopy struct {
}
type ShallowCopyStatus struct {
State string `json:"state"`
CopiedClusters uint64 `json:"copied_clusters"`
TotalClusters uint64 `json:"total_clusters"`
Error string `json:"error,omitempty"`
State string `json:"state"`
CopiedClusters uint64 `json:"copied_clusters"`
UnmappedClusters uint64 `json:"unmapped_clusters,omitempty"`
TotalClusters uint64 `json:"total_clusters"`
Error string `json:"error,omitempty"`
}
type BdevLvolCreateLvstoreRequest struct {
@ -72,6 +73,12 @@ type BdevLvolRenameLvstoreRequest struct {
NewName string `json:"new_name"`
}
type BdevLvolSetXattrRequest struct {
Name string `json:"name"`
XattrName string `json:"xattr_name"`
XattrValue string `json:"xattr_value"`
}
type BdevLvolGetXattrRequest struct {
Name string `json:"name"`
XattrName string `json:"xattr_name"`
@ -124,6 +131,10 @@ type BdevLvolDecoupleParentRequest struct {
Name string `json:"name"`
}
type BdevLvolDetachParentRequest struct {
Name string `json:"name"`
}
type BdevLvolSetParentRequest struct {
LvolName string `json:"lvol_name"`
ParentName string `json:"parent_name"`
@ -139,6 +150,12 @@ type BdevLvolShallowCopyRequest struct {
DstBdevName string `json:"dst_bdev_name"`
}
type BdevLvolRangeShallowCopyRequest struct {
SrcLvolName string `json:"src_lvol_name"`
DstBdevName string `json:"dst_bdev_name"`
Clusters []uint64 `json:"clusters"`
}
type BdevLvolGetFragmapRequest struct {
Name string `json:"name"`
Offset uint64 `json:"offset"`
@ -154,14 +171,33 @@ type BdevLvolRegisterSnapshotChecksumRequest struct {
Name string `json:"name"`
}
type BdevLvolRegisterRangeChecksumsRequest struct {
Name string `json:"name"`
}
type BdevLvolGetSnapshotChecksumRequest struct {
Name string `json:"name"`
}
type BdevLvolGetRangeChecksumsRequest struct {
Name string `json:"name"`
ClusterStartIndex uint64 `json:"cluster_start_index"`
ClusterCount uint64 `json:"cluster_count"`
}
type BdevLvolSnapshotChecksum struct {
Checksum uint64 `json:"checksum"`
}
type BdevLvolRangeChecksum struct {
ClusterIndex uint64 `json:"cluster_index"`
Checksum uint64 `json:"checksum"`
}
type BdevLvolStopSnapshotChecksumRequest struct {
Name string `json:"name"`
}
func GetLvolAlias(lvsName, lvolName string) string {
return fmt.Sprintf("%s/%s", lvsName, lvolName)
}

View File

@ -0,0 +1,34 @@
package types
type UblkCreateTargetRequest struct {
Cpumask string `json:"cpumask,omitempty"`
DisableUserCopy bool `json:"disable_user_copy"`
}
type UblkGetDisksRequest struct {
UblkId int32 `json:"ublk_id"`
}
type UblkDevice struct {
BdevName string `json:"bdev_name"`
ID int32 `json:"id"`
NumQueues int32 `json:"num_queues"`
QueueDepth int32 `json:"queue_depth"`
UblkDevice string `json:"ublk_device"`
}
type UblkStartDiskRequest struct {
BdevName string `json:"bdev_name"`
UblkId int32 `json:"ublk_id"`
QueueDepth int32 `json:"queue_depth"`
NumQueues int32 `json:"num_queues"`
}
type UblkRecoverDiskRequest struct {
BdevName string `json:"bdev_name"`
UblkId int32 `json:"ublk_id"`
}
type UblkStopDiskRequest struct {
UblkId int32 `json:"ublk_id"`
}

View File

@ -18,12 +18,13 @@ const (
FrontendSPDKTCPNvmf = "spdk-tcp-nvmf"
FrontendSPDKTCPBlockdev = "spdk-tcp-blockdev"
FrontendSPDKUblk = "ublk"
ShallowCopyStateInProgress = "in progress"
ShallowCopyStateComplete = "complete"
ShallowCopyStateError = "error"
ExecuteTimeout = 60 * time.Second
ExecuteTimeout = 180 * time.Second
)
const (

View File

@ -36,7 +36,7 @@ type BlockDevices struct {
}
type LonghornBlockDevice struct {
Nvme BlockDevice
Source BlockDevice
Export BlockDevice
}
@ -81,14 +81,14 @@ func GetKnownDevices(executor *commonns.Executor) (map[string]*LonghornBlockDevi
f := strings.Fields(line)
if len(f) == 2 {
dev := &LonghornBlockDevice{
Nvme: BlockDevice{
Source: BlockDevice{
Name: f[0],
},
}
if _, err := fmt.Sscanf(f[1], "%d:%d", &dev.Nvme.Major, &dev.Nvme.Minor); err != nil {
return nil, fmt.Errorf("invalid major:minor %s for NVMe device %s", dev.Nvme.Name, f[1])
if _, err := fmt.Sscanf(f[1], "%d:%d", &dev.Source.Major, &dev.Source.Minor); err != nil {
return nil, fmt.Errorf("invalid major:minor %s for NVMe device %s", dev.Source.Name, f[1])
}
knownDevices[dev.Nvme.Name] = dev
knownDevices[dev.Source.Name] = dev
}
}
@ -219,11 +219,11 @@ func DuplicateDevice(dev *LonghornBlockDevice, dest string) error {
dir := filepath.Dir(dest)
if _, err := os.Stat(dir); os.IsNotExist(err) {
if err := os.MkdirAll(dir, 0755); err != nil {
logrus.WithError(err).Fatalf("device %v: Failed to create directory for %v", dev.Nvme.Name, dest)
logrus.WithError(err).Fatalf("device %v: Failed to create directory for %v", dev.Source.Name, dest)
}
}
if err := mknod(dest, dev.Export.Major, dev.Export.Minor); err != nil {
return errors.Wrapf(err, "cannot create device node %s for device %s", dest, dev.Nvme.Name)
return errors.Wrapf(err, "cannot create device node %s for device %s", dest, dev.Source.Name)
}
if err := os.Chmod(dest, 0660); err != nil {
return errors.Wrapf(err, "cannot change permission of the device %s", dest)

View File

@ -4,14 +4,18 @@ import (
"fmt"
"time"
"github.com/longhorn/types/pkg/generated/enginerpc"
"github.com/sirupsen/logrus"
"github.com/pkg/errors"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
healthpb "google.golang.org/grpc/health/grpc_health_v1"
"google.golang.org/protobuf/types/known/emptypb"
healthpb "google.golang.org/grpc/health/grpc_health_v1"
"github.com/longhorn/types/pkg/generated/enginerpc"
"github.com/longhorn/longhorn-engine/pkg/interceptor"
"github.com/longhorn/longhorn-engine/pkg/meta"
"github.com/longhorn/longhorn-engine/pkg/types"
@ -416,7 +420,12 @@ func (c *ControllerClient) Check() error {
if err != nil {
return errors.Wrapf(err, "cannot connect to ControllerService %v", c.serviceURL)
}
defer conn.Close()
defer func() {
if errClose := conn.Close(); errClose != nil {
logrus.WithError(errClose).Errorf("Failed to close controller client for %v", c.serviceURL)
}
}()
// TODO: JM we can reuse the controller service context connection for the health requests
healthCheckClient := healthpb.NewHealthClient(conn)

View File

@ -61,7 +61,7 @@ func (q *Qcow) ReadAt(buf []byte, off int64) (int, error) {
}
func (q *Qcow) UnmapAt(length uint32, off int64) (int, error) {
return 0, errors.New("Unsupported operation")
return 0, errors.New("unsupported operation")
}
func (q *Qcow) Close() error {

View File

@ -168,6 +168,7 @@ func GetReplicaInfo(r *enginerpc.Replica) *types.ReplicaInfo {
RevisionCounterDisabled: r.RevisionCounterDisabled,
UnmapMarkDiskChainRemoved: r.UnmapMarkDiskChainRemoved,
SnapshotCountUsage: int(r.SnapshotCountUsage),
SnapshotCountTotal: int(r.SnapshotCountTotal),
SnapshotSizeUsage: r.SnapshotSizeUsage,
}

View File

@ -268,7 +268,11 @@ func GetSnapshotHashInfoFromChecksumFile(snapshotName string) (*xattrType.Snapsh
if err != nil {
return nil, err
}
defer f.Close()
defer func() {
if errClose := f.Close(); errClose != nil {
logrus.WithError(errClose).Errorf("Failed to close checksum file %v", path)
}
}()
var info xattrType.SnapshotHashInfo
@ -315,7 +319,11 @@ func encodeToFile(obj interface{}, path string) (err error) {
if err != nil {
return err
}
defer f.Close()
defer func() {
if errClose := f.Close(); errClose != nil {
logrus.WithError(errClose).Errorf("Failed to close checksum file %v", tmpPath)
}
}()
if err := json.NewEncoder(f).Encode(&obj); err != nil {
return err
@ -391,7 +399,11 @@ func hashSnapshot(ctx context.Context, snapshotName string) (string, error) {
if err != nil {
return "", errors.Wrapf(err, "failed to open %v", path)
}
defer f.Close()
defer func() {
if errClose := f.Close(); errClose != nil {
logrus.WithError(errClose).Warnf("Failed to close file %v", path)
}
}()
h, err := newHashMethod(defaultHashMethod)
if err != nil {

View File

@ -29,8 +29,6 @@ import (
const (
volumeMetaData = "volume.meta"
maximumChainLength = 250
tmpFileSuffix = ".tmp"
// Special indexes inside r.volume.files
@ -151,7 +149,7 @@ func New(ctx context.Context, size, sectorSize int64, dir string, backingFile *b
func NewReadOnly(ctx context.Context, dir, head string, backingFile *backingfile.BackingFile) (*Replica, error) {
// size and sectorSize don't matter because they will be read from metadata
// snapshotMaxCount and SnapshotMaxSize don't matter because readonly replica can't create a new disk
return construct(ctx, true, 0, diskutil.ReplicaSectorSize, dir, head, backingFile, false, false, 250, 0)
return construct(ctx, true, 0, diskutil.ReplicaSectorSize, dir, head, backingFile, false, false, types.MaximumTotalSnapshotCount, 0)
}
func construct(ctx context.Context, readonly bool, size, sectorSize int64, dir, head string, backingFile *backingfile.BackingFile, disableRevCounter, unmapMarkDiskChainRemoved bool, snapshotMaxCount int, snapshotMaxSize int64) (*Replica, error) {
@ -265,7 +263,11 @@ func (r *Replica) isExtentSupported() error {
return err
}
defer file.Close()
defer func() {
if errClose := file.Close(); errClose != nil {
logrus.WithError(errClose).Errorf("Failed to close file %v", filePath)
}
}()
fiemapFile := fibmap.NewFibmapFile(file)
if _, errno := fiemapFile.Fiemap(uint32(fileInfo.Size())); errno != 0 {
@ -634,7 +636,9 @@ func (r *Replica) isBackingFile(index int) bool {
func (r *Replica) closeWithoutWritingMetaData() {
for i, f := range r.volume.files {
if f != nil && !r.isBackingFile(i) {
f.Close()
if errClose := f.Close(); errClose != nil {
logrus.WithError(errClose).Error("Failed to close file")
}
}
}
}
@ -726,7 +730,9 @@ func (r *Replica) createNewHead(oldHead, parent, created string, size int64) (f
// the upper layer either succeeds to execute all functions,
// or fails in the middle then does rollback for the previous succeeded parts so that everything looks like unchanged.
rollbackFunc = func() error {
f.Close()
if errClose := f.Close(); errClose != nil {
logrus.WithError(errClose).Error("Failed to close file")
}
if subRollbackFunc != nil {
return types.CombineErrors(subRollbackFunc(), r.rmDisk(newHeadName))
}
@ -867,7 +873,11 @@ func (r *Replica) revertDisk(parentDiskFileName, created string) (*Replica, erro
if err != nil {
return nil, err
}
defer f.Close()
defer func() {
if errClose := f.Close(); errClose != nil {
logrus.WithError(errClose).Error("Failed to close file")
}
}()
info := r.info
info.Head = newHeadDisk.Name
@ -1050,7 +1060,7 @@ func (r *Replica) openLiveChain() error {
return err
}
if len(chain) > maximumChainLength {
if len(chain) > types.MaximumTotalSnapshotCount {
return fmt.Errorf("live chain is too long: %v", len(chain))
}
@ -1161,7 +1171,11 @@ func (r *Replica) unmarshalFile(file string, obj interface{}) error {
if err != nil {
return err
}
defer f.Close()
defer func() {
if errClose := f.Close(); errClose != nil {
logrus.WithError(errClose).Errorf("Failed to close file %v", p)
}
}()
dec := json.NewDecoder(f)
return dec.Decode(obj)
@ -1398,11 +1412,15 @@ func (r *Replica) ListDisks() map[string]DiskInfo {
return result
}
func (r *Replica) GetSnapshotCountUsage() int {
func (r *Replica) GetSnapshotCount() (int, int) {
r.RLock()
defer r.RUnlock()
return r.getSnapshotCountUsage()
return r.getSnapshotCountUsage(), r.getSnapshotCountTotal()
}
func (r *Replica) getSnapshotCountTotal() int {
return len(r.diskData)
}
func (r *Replica) getSnapshotCountUsage() int {

View File

@ -101,7 +101,9 @@ func (s *Server) Reload() error {
oldReplica := s.r
s.r = newReplica
oldReplica.Close()
if errClose := oldReplica.Close(); errClose != nil {
logrus.WithError(errClose).Error("Failed to close old replica")
}
return nil
}

Some files were not shown because too many files have changed in this diff Show More