Compare commits

..

20 Commits

Author SHA1 Message Date
Wang Yan af4843729c
build base images for v2.13.0 (#22013)
Signed-off-by: wang yan <wangyan@vmware.com>
2025-05-20 05:47:24 +00:00
Wang Yan 647842f419
[cherry-pick] udpate storage to s3 (#21999) (#22002)
udpate storage to s3 (#21999)

move the build storage from google storage to the CNCF S3 storage

Currently, we use the internal GCR to store all dev builds for nightly testing, development, and as candidates for RC and GA releases. However, this internal Google storage will no longer be available, this pull request it to move to the CNCF-hosted S3 storage.

Signed-off-by: wang yan <wangyan@vmware.com>
2025-05-19 10:56:10 +08:00
Daniel Jiang b8ee76373f
Pin trivy adapter to the GA version v0.33.1 (#21997)
Signed-off-by: Daniel Jiang <daniel.jiang@broadcom.com>
2025-05-15 06:40:04 +00:00
Daniel Jiang 9a932a0f9a
Bump up Trivy adapter for v2.13.x (#21984)
Signed-off-by: Daniel Jiang <daniel.jiang@broadcom.com>
2025-05-09 09:16:20 +00:00
Prasanth Baskar abc5e55ea9
[cherry-pick] Fix: Helm Chart Copy Button in UI (#21983)
Fix: Helm Chart Copy Button in UI (#21969)

* fix: helm chart copy btn in UI



* add: tests for pull command component in UI



---------

Signed-off-by: bupd <bupdprasanth@gmail.com>
2025-05-09 06:28:03 +00:00
Wang Yan 785b60bcbe
bump dependencies of golang (#21978)
Signed-off-by: wang yan <wangyan@vmware.com>
2025-05-08 09:48:10 +00:00
Wang Yan 6d3bf31364
build base images for v2.13.1 (#21976)
Signed-off-by: wang yan <wangyan@vmware.com>
2025-05-08 16:05:44 +08:00
Wang Yan 4170738322
upgrade the build machine to ubuntu 22 (#21975)
Per https://github.com/actions/runner-images/issues/11101, the ububnu 20.04 is out of support. This change it up the git action machine to 22.04

Signed-off-by: wang yan <wangyan@vmware.com>
2025-05-08 14:57:53 +08:00
Wang Yan 028b393112
bump base version for v2.13.1 (#21965)
Signed-off-by: wang yan <wangyan@vmware.com>
2025-05-06 17:15:06 +08:00
Wang Yan c130e3d539
[cherry-pick] unify the golang image version (#21936)
unify the golang image version

Make the golang version as a unified parameter to build all harbor components

Signed-off-by: wang yan <wangyan@vmware.com>
2025-04-27 07:11:13 +00:00
Wang Yan a37118f518
[cherry-pick]revise make file for lint api (#21932)
revise make file for lint api

Decouple the lint from the api generation step in the makefile.

Signed-off-by: wang yan <wangyan@vmware.com>
2025-04-25 19:23:01 +08:00
Wang Yan 40e78d5545
[cherry-pick] enhance the query judgement (#21927)
enhance the query judgement

the query parameter cannot contains orm.ExerSep which is key characters that used by orm.
the pull request enhances the validation for query parameters.

Signed-off-by: wang yan <wangyan@vmware.com>
2025-04-24 19:16:43 +08:00
Wang Yan 05d5b64ff9
update trivy scan branch (#21918)
Signed-off-by: wang yan <wangyan@vmware.com>
2025-04-24 15:02:08 +08:00
stonezdj(Daojun Zhang) f019430872
[cherry-pick] Update artifact info (#21907)
update artifact info

Signed-off-by: stonezdj <stone.zhang@broadcom.com>
2025-04-22 13:13:38 +08:00
miner b7e00b2e53
[cherry-pick]fix jobservice container loglevel consistent with job_log (#21875)
Signed-off-by: yminer <miner.yang@broadcom.com>
2025-04-15 14:07:29 +08:00
Wang Yan db1569ae20
build base for v2.13 (#21852)
Signed-off-by: wang yan <wangyan@vmware.com>
2025-04-09 11:07:48 +08:00
Chlins Zhang 69c62ef41a
[CHERRY-PICK] fix: support preheat cnai model artifact (#21851)
fix: support preheat cnai model artifact

Signed-off-by: chlins <chlins.zhang@gmail.com>
2025-04-08 12:20:42 +00:00
Prasanth Baskar d569ba20d6
[cherry-pick] Update dependencies in Harbor UI (#21848)
Update dependencies in Harbor UI (#21823)

* deps: update src/portal/app-swagger-ui



* deps: update swagger-ui



* deps: update src/portal



---------

Signed-off-by: bupd <bupdprasanth@gmail.com>
2025-04-08 16:12:26 +08:00
stonezdj(Daojun Zhang) f5f912a780
[cherry-pick] Update robot testcase related to security hub row count to 15 by default (#21847)
Update robot testcase related to security hub row count to 15 by default

Signed-off-by: stonezdj <stone.zhang@broadcom.com>
2025-04-08 15:17:55 +08:00
Wang Yan 15f3aabc0d
build base images for v2.13 (#21820)
Signed-off-by: wang yan <wangyan@vmware.com>
2025-04-03 13:50:41 +08:00
676 changed files with 2541 additions and 5064 deletions

View File

@ -8,6 +8,18 @@
* Add date here... Add signature here... * Add date here... Add signature here...
- Add your reason here... - Add your reason here...
* May 20 2025 <yan-yw.wang@broadcom.com>
- Refresh base image
* May 08 2025 <yan-yw.wang@broadcom.com>
- Refresh base image
* Apr 08 2025 <yan-yw.wang@broadcom.com>
- Refresh base image
* Apr 03 2025 <yan-yw.wang@broadcom.com>
- Refresh base image
* Oct 24 2024 <yan-yw.wang@broadcom.com> * Oct 24 2024 <yan-yw.wang@broadcom.com>
- Refresh base image - Refresh base image

View File

@ -91,7 +91,7 @@ jobs:
- name: Codecov For BackEnd - name: Codecov For BackEnd
uses: codecov/codecov-action@v5 uses: codecov/codecov-action@v5
with: with:
files: ./src/github.com/goharbor/harbor/profile.cov file: ./src/github.com/goharbor/harbor/profile.cov
flags: unittests flags: unittests
APITEST_DB: APITEST_DB:
@ -333,5 +333,5 @@ jobs:
- name: Codecov For UI - name: Codecov For UI
uses: codecov/codecov-action@v5 uses: codecov/codecov-action@v5
with: with:
files: ./src/github.com/goharbor/harbor/src/portal/coverage/lcov.info file: ./src/github.com/goharbor/harbor/src/portal/coverage/lcov.info
flags: unittests flags: unittests

View File

@ -16,7 +16,7 @@ jobs:
- ubuntu-22.04 - ubuntu-22.04
steps: steps:
- name: Configure AWS credentials - name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4.2.1 uses: aws-actions/configure-aws-credentials@v4.1.0
with: with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
@ -87,8 +87,8 @@ jobs:
else else
build_base_params=" BUILD_BASE=true PUSHBASEIMAGE=true REGISTRYUSER=\"${{ secrets.DOCKER_HUB_USERNAME }}\" REGISTRYPASSWORD=\"${{ secrets.DOCKER_HUB_PASSWORD }}\"" build_base_params=" BUILD_BASE=true PUSHBASEIMAGE=true REGISTRYUSER=\"${{ secrets.DOCKER_HUB_USERNAME }}\" REGISTRYPASSWORD=\"${{ secrets.DOCKER_HUB_PASSWORD }}\""
fi fi
sudo make package_offline GOBUILDTAGS="include_oss include_gcs" BASEIMAGETAG=${Harbor_Build_Base_Tag} VERSIONTAG=${Harbor_Assets_Version} PKGVERSIONTAG=${Harbor_Package_Version} TRIVYFLAG=true EXPORTERFLAG=true HTTPPROXY= ${build_base_params} sudo make package_offline GOBUILDTAGS="include_oss include_gcs" BASEIMAGETAG=${Harbor_Build_Base_Tag} VERSIONTAG=${Harbor_Assets_Version} PKGVERSIONTAG=${Harbor_Package_Version} TRIVYFLAG=true HTTPPROXY= ${build_base_params}
sudo make package_online GOBUILDTAGS="include_oss include_gcs" BASEIMAGETAG=${Harbor_Build_Base_Tag} VERSIONTAG=${Harbor_Assets_Version} PKGVERSIONTAG=${Harbor_Package_Version} TRIVYFLAG=true EXPORTERFLAG=true HTTPPROXY= ${build_base_params} sudo make package_online GOBUILDTAGS="include_oss include_gcs" BASEIMAGETAG=${Harbor_Build_Base_Tag} VERSIONTAG=${Harbor_Assets_Version} PKGVERSIONTAG=${Harbor_Package_Version} TRIVYFLAG=true HTTPPROXY= ${build_base_params}
harbor_offline_build_bundle=$(basename harbor-offline-installer-*.tgz) harbor_offline_build_bundle=$(basename harbor-offline-installer-*.tgz)
harbor_online_build_bundle=$(basename harbor-online-installer-*.tgz) harbor_online_build_bundle=$(basename harbor-online-installer-*.tgz)
echo "Package name is: $harbor_offline_build_bundle" echo "Package name is: $harbor_offline_build_bundle"

View File

@ -18,7 +18,7 @@ jobs:
- ubuntu-latest - ubuntu-latest
steps: steps:
- name: Configure AWS credentials - name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4.2.1 uses: aws-actions/configure-aws-credentials@v4.1.0
with: with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}

View File

@ -12,7 +12,7 @@ jobs:
matrix: matrix:
# maintain the versions of harbor that need to be actively # maintain the versions of harbor that need to be actively
# security scanned # security scanned
versions: [dev, v2.12.0-dev] versions: [dev, v2.13.0-dev]
# list of images that need to be scanned # list of images that need to be scanned
images: [harbor-core, harbor-db, harbor-exporter, harbor-jobservice, harbor-log, harbor-portal, harbor-registryctl, prepare] images: [harbor-core, harbor-db, harbor-exporter, harbor-jobservice, harbor-log, harbor-portal, harbor-registryctl, prepare]
permissions: permissions:

View File

@ -9,9 +9,6 @@ on:
- '!tests/**.sh' - '!tests/**.sh'
- '!tests/apitests/**' - '!tests/apitests/**'
- '!tests/ci/**' - '!tests/ci/**'
- '!tests/resources/**'
- '!tests/robot-cases/**'
- '!tests/robot-cases/Group1-Nightly/**'
push: push:
paths: paths:
- 'docs/**' - 'docs/**'
@ -20,9 +17,6 @@ on:
- '!tests/**.sh' - '!tests/**.sh'
- '!tests/apitests/**' - '!tests/apitests/**'
- '!tests/ci/**' - '!tests/ci/**'
- '!tests/resources/**'
- '!tests/robot-cases/**'
- '!tests/robot-cases/Group1-Nightly/**'
jobs: jobs:
UTTEST: UTTEST:

View File

@ -20,7 +20,7 @@ jobs:
echo "BRANCH=$(echo $release | jq -r '.target_commitish')" >> $GITHUB_ENV echo "BRANCH=$(echo $release | jq -r '.target_commitish')" >> $GITHUB_ENV
echo "PRERELEASE=$(echo $release | jq -r '.prerelease')" >> $GITHUB_ENV echo "PRERELEASE=$(echo $release | jq -r '.prerelease')" >> $GITHUB_ENV
- name: Configure AWS credentials - name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4.2.1 uses: aws-actions/configure-aws-credentials@v4.1.0
with: with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}

View File

@ -31,10 +31,10 @@ API explorer integration. End users can now explore and trigger Harbors API v
* Support Image Retag, enables the user to tag image to different repositories and projects, this is particularly useful in cases when images need to be retagged programmatically in a CI pipeline. * Support Image Retag, enables the user to tag image to different repositories and projects, this is particularly useful in cases when images need to be retagged programmatically in a CI pipeline.
* Support Image Build History, makes it easy to see the contents of a container image, refer to the [User Guide](https://github.com/goharbor/harbor/blob/release-1.7.0/docs/user_guide.md#build-history). * Support Image Build History, makes it easy to see the contents of a container image, refer to the [User Guide](https://github.com/goharbor/harbor/blob/release-1.7.0/docs/user_guide.md#build-history).
* Support Logger customization, enables the user to customize STDOUT / STDERR / FILE / DB logger of running jobs. * Support Logger customization, enables the user to customize STDOUT / STDERR / FILE / DB logger of running jobs.
* Improve the user experience of Helm Chart Repository: * Improve user experience of Helm Chart Repository:
- Chart searching is included in the global search results - Chart searching included in the global search results
- Show the total number of chart versions in the chart list - Show chart versions total number in the chart list
- Mark labels in helm charts - Mark labels to helm charts
- The latest version can be downloaded as default one on the chart list view - The latest version can be downloaded as default one on the chart list view
- The chart can be deleted by deleting all the versions under it - The chart can be deleted by deleting all the versions under it
@ -58,7 +58,7 @@ API explorer integration. End users can now explore and trigger Harbors API v
- Replication policy rework to support wildcard, scheduled replication. - Replication policy rework to support wildcard, scheduled replication.
- Support repository level description. - Support repository level description.
- Batch operation on projects/repositories/users from UI. - Batch operation on projects/repositories/users from UI.
- On board LDAP user when adding a member to a project. - On board LDAP user when adding member to a project.
## v1.3.0 (2018-01-04) ## v1.3.0 (2018-01-04)
@ -75,11 +75,11 @@ API explorer integration. End users can now explore and trigger Harbors API v
## v1.1.0 (2017-04-18) ## v1.1.0 (2017-04-18)
- Add in Notary support - Add in Notary support
- User can update the configuration through Harbor UI - User can update configuration through Harbor UI
- Redesign of Harbor's UI using Clarity - Redesign of Harbor's UI using Clarity
- Some changes to API - Some changes to API
- Fix some security issues in the token service - Fix some security issues in token service
- Upgrade the base image of nginx to the latest openssl version - Upgrade base image of nginx for latest openssl version
- Various bug fixes. - Various bug fixes.
## v0.5.0 (2016-12-6) ## v0.5.0 (2016-12-6)
@ -88,7 +88,7 @@ API explorer integration. End users can now explore and trigger Harbors API v
- Easier configuration for HTTPS in prepare script - Easier configuration for HTTPS in prepare script
- Script to collect logs of a Harbor deployment - Script to collect logs of a Harbor deployment
- User can view the storage usage (default location) of Harbor. - User can view the storage usage (default location) of Harbor.
- Add an attribute to disable normal users from creating projects. - Add an attribute to disable normal user to create project
- Various bug fixes. - Various bug fixes.
For Harbor virtual appliance: For Harbor virtual appliance:

View File

@ -14,7 +14,7 @@ Contributors are encouraged to collaborate using the following resources in addi
* Chat with us on the CNCF Slack ([get an invitation here][cncf-slack] ) * Chat with us on the CNCF Slack ([get an invitation here][cncf-slack] )
* [#harbor][users-slack] for end-user discussions * [#harbor][users-slack] for end-user discussions
* [#harbor-dev][dev-slack] for development of Harbor * [#harbor-dev][dev-slack] for development of Harbor
* Want long-form communication instead of Slack? We have two distribution lists: * Want long-form communication instead of Slack? We have two distributions lists:
* [harbor-users][users-dl] for end-user discussions * [harbor-users][users-dl] for end-user discussions
* [harbor-dev][dev-dl] for development of Harbor * [harbor-dev][dev-dl] for development of Harbor
@ -49,7 +49,7 @@ To build the project, please refer the [build](https://goharbor.io/docs/edge/bui
### Repository Structure ### Repository Structure
Here is the basic structure of the Harbor code base. Some key folders / files are commented for your reference. Here is the basic structure of the harbor code base. Some key folders / files are commented for your references.
``` ```
. .
... ...
@ -168,14 +168,13 @@ Harbor backend is written in [Go](http://golang.org/). If you don't have a Harbo
| 2.11 | 1.22.3 | | 2.11 | 1.22.3 |
| 2.12 | 1.23.2 | | 2.12 | 1.23.2 |
| 2.13 | 1.23.8 | | 2.13 | 1.23.8 |
| 2.14 | 1.24.5 |
Ensure your GOPATH and PATH have been configured in accordance with the Go environment instructions. Ensure your GOPATH and PATH have been configured in accordance with the Go environment instructions.
#### Web #### Web
Harbor web UI is built based on [Clarity](https://vmware.github.io/clarity/) and [Angular](https://angular.io/) web framework. To setup a web UI development environment, please make sure that the [npm](https://www.npmjs.com/get-npm) tool is installed first. Harbor web UI is built based on [Clarity](https://vmware.github.io/clarity/) and [Angular](https://angular.io/) web framework. To setup web UI development environment, please make sure the [npm](https://www.npmjs.com/get-npm) tool is installed first.
| Harbor | Requires Angular | Requires Clarity | | Harbor | Requires Angular | Requires Clarity |
|----------|--------------------|--------------------| |----------|--------------------|--------------------|
@ -205,7 +204,7 @@ PR are always welcome, even if they only contain small fixes like typos or a few
Please submit a PR broken down into small changes bit by bit. A PR consisting of a lot of features and code changes may be hard to review. It is recommended to submit PRs in an incremental fashion. Please submit a PR broken down into small changes bit by bit. A PR consisting of a lot of features and code changes may be hard to review. It is recommended to submit PRs in an incremental fashion.
Note: If you split your pull request to small changes, please make sure any of the changes goes to `main` will not break anything. Otherwise, it can not be merged until this feature completed. Note: If you split your pull request to small changes, please make sure any of the changes goes to `main` will not break anything. Otherwise, it can not be merged until this feature complete.
### Fork and clone ### Fork and clone
@ -279,7 +278,7 @@ To build the code, please refer to [build](https://goharbor.io/docs/edge/build-c
**Note**: from v2.0, Harbor uses [go-swagger](https://github.com/go-swagger/go-swagger) to generate API server from Swagger 2.0 (aka [OpenAPI 2.0](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md)). To add or change the APIs, first update the `api/v2.0/swagger.yaml` file, then run `make gen_apis` to generate the API server, finally, implement or update the API handlers in `src/server/v2.0/handler` package. **Note**: from v2.0, Harbor uses [go-swagger](https://github.com/go-swagger/go-swagger) to generate API server from Swagger 2.0 (aka [OpenAPI 2.0](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md)). To add or change the APIs, first update the `api/v2.0/swagger.yaml` file, then run `make gen_apis` to generate the API server, finally, implement or update the API handlers in `src/server/v2.0/handler` package.
As Harbor now uses `controller/manager/dao` programming model, we suggest using [testify mock](https://github.com/stretchr/testify/blob/master/mock/doc.go) to test `controller` and `manager`. Harbor integrates [mockery](https://github.com/vektra/mockery) to generate mocks for golang interfaces using the testify mock package. To generate mocks for the interface, first add mock config in the `src/.mockery.yaml`, then run `make gen_mocks` to generate mocks. As now Harbor uses `controller/manager/dao` programming model, we suggest to use [testify mock](https://github.com/stretchr/testify/blob/master/mock/doc.go) to test `controller` and `manager`. Harbor integrates [mockery](https://github.com/vektra/mockery) to generate mocks for golang interfaces using the testify mock package. To generate mocks for the interface, first add mock config in the `src/.mockery.yaml`, then run `make gen_mocks` to generate mocks.
### Keep sync with upstream ### Keep sync with upstream
@ -318,15 +317,15 @@ curl https://cdn.jsdelivr.net/gh/tommarshall/git-good-commit@v0.6.1/hook.sh > .g
``` ```
### Automated Testing ### Automated Testing
Once your pull request has been opened, Harbor will run two CI pipelines against it. Once your pull request has been opened, harbor will run two CI pipelines against it.
1. In the travis CI, your source code will be checked via `golint`, `go vet` and `go race` that makes sure the code is readable, safe and correct. Also, all of unit tests will be triggered via `go test` against the pull request. What you need to pay attention to is the travis result and the coverage report. 1. In the travis CI, your source code will be checked via `golint`, `go vet` and `go race` that makes sure the code is readable, safe and correct. Also, all of unit tests will be triggered via `go test` against the pull request. What you need to pay attention to is the travis result and the coverage report.
* If any failure in travis, you need to figure out whether it is introduced by your commits. * If any failure in travis, you need to figure out whether it is introduced by your commits.
* If the coverage dramatically declines, then you need to commit a unit test to cover your code. * If the coverage dramatic decline, you need to commit unit test to coverage your code.
2. In the drone CI, the E2E test will be triggered against the pull request. Also, the source code will be checked via `gosec`, and the result is stored in google storage for later analysis. The pipeline is about to build and install harbor from source code, then to run four very basic E2E tests to validate the basic functionalities of Harbor, like: 2. In the drone CI, the E2E test will be triggered against the pull request. Also, the source code will be checked via `gosec`, and the result is stored in google storage for later analysis. The pipeline is about to build and install harbor from source code, then to run four very basic E2E tests to validate the basic functionalities of harbor, like:
* Registry Basic Verification, to validate that the image can be pulled and pushed successfully. * Registry Basic Verification, to validate the image can be pulled and pushed successful.
* Trivy Basic Verification, to validate that the image can be scanned successfully. * Trivy Basic Verification, to validate the image can be scanned successful.
* Notary Basic Verification, to validate that the image can be signed successfully. * Notary Basic Verification, to validate the image can be signed successful.
* Ldap Basic Verification, to validate that Harbor can work in LDAP environment. * Ldap Basic Verification, to validate harbor can work in LDAP environment.
### Push and Create PR ### Push and Create PR
When ready for review, push your branch to your fork repository on `github.com`: When ready for review, push your branch to your fork repository on `github.com`:
@ -345,7 +344,7 @@ Commit changes made in response to review comments to the same branch on your fo
It is a great way to contribute to Harbor by reporting an issue. Well-written and complete bug reports are always welcome! Please open an issue on GitHub and follow the template to fill in required information. It is a great way to contribute to Harbor by reporting an issue. Well-written and complete bug reports are always welcome! Please open an issue on GitHub and follow the template to fill in required information.
Before opening any issue, please look up the existing [issues](https://github.com/goharbor/harbor/issues) to avoid submitting a duplicate. Before opening any issue, please look up the existing [issues](https://github.com/goharbor/harbor/issues) to avoid submitting a duplication.
If you find a match, you can "subscribe" to it to get notified on updates. If you have additional helpful information about the issue, please leave a comment. If you find a match, you can "subscribe" to it to get notified on updates. If you have additional helpful information about the issue, please leave a comment.
When reporting issues, always include: When reporting issues, always include:

View File

@ -78,7 +78,6 @@ REGISTRYSERVER=
REGISTRYPROJECTNAME=goharbor REGISTRYPROJECTNAME=goharbor
DEVFLAG=true DEVFLAG=true
TRIVYFLAG=false TRIVYFLAG=false
EXPORTERFLAG=false
HTTPPROXY= HTTPPROXY=
BUILDREG=true BUILDREG=true
BUILDTRIVYADP=true BUILDTRIVYADP=true
@ -93,12 +92,7 @@ VERSIONTAG=dev
BUILD_BASE=true BUILD_BASE=true
PUSHBASEIMAGE=false PUSHBASEIMAGE=false
BASEIMAGETAG=dev BASEIMAGETAG=dev
# for skip build prepare and log container while BUILD_INSTALLER=false BUILDBASETARGET=trivy-adapter core db jobservice log nginx portal prepare redis registry registryctl exporter
BUILD_INSTALLER=true
BUILDBASETARGET=trivy-adapter core db jobservice nginx portal redis registry registryctl exporter
ifeq ($(BUILD_INSTALLER), true)
BUILDBASETARGET += prepare log
endif
IMAGENAMESPACE=goharbor IMAGENAMESPACE=goharbor
BASEIMAGENAMESPACE=goharbor BASEIMAGENAMESPACE=goharbor
# #input true/false only # #input true/false only
@ -111,8 +105,8 @@ PREPARE_VERSION_NAME=versions
#versions #versions
REGISTRYVERSION=v2.8.3-patch-redis REGISTRYVERSION=v2.8.3-patch-redis
TRIVYVERSION=v0.61.0 TRIVYVERSION=v0.62.1
TRIVYADAPTERVERSION=v0.33.0-rc.2 TRIVYADAPTERVERSION=v0.33.1
NODEBUILDIMAGE=node:16.18.0 NODEBUILDIMAGE=node:16.18.0
# version of registry for pulling the source code # version of registry for pulling the source code
@ -135,7 +129,6 @@ endef
# docker parameters # docker parameters
DOCKERCMD=$(shell which docker) DOCKERCMD=$(shell which docker)
DOCKERBUILD=$(DOCKERCMD) build DOCKERBUILD=$(DOCKERCMD) build
DOCKERNETWORK=default
DOCKERRMIMAGE=$(DOCKERCMD) rmi DOCKERRMIMAGE=$(DOCKERCMD) rmi
DOCKERPULL=$(DOCKERCMD) pull DOCKERPULL=$(DOCKERCMD) pull
DOCKERIMAGES=$(DOCKERCMD) images DOCKERIMAGES=$(DOCKERCMD) images
@ -151,7 +144,7 @@ GOINSTALL=$(GOCMD) install
GOTEST=$(GOCMD) test GOTEST=$(GOCMD) test
GODEP=$(GOTEST) -i GODEP=$(GOTEST) -i
GOFMT=gofmt -w GOFMT=gofmt -w
GOBUILDIMAGE=golang:1.24.5 GOBUILDIMAGE=golang:1.23.8
GOBUILDPATHINCONTAINER=/harbor GOBUILDPATHINCONTAINER=/harbor
# go build # go build
@ -245,27 +238,18 @@ REGISTRYUSER=
REGISTRYPASSWORD= REGISTRYPASSWORD=
# cmds # cmds
DOCKERSAVE_PARA=$(DOCKERIMAGENAME_PORTAL):$(VERSIONTAG) \ DOCKERSAVE_PARA=$(DOCKER_IMAGE_NAME_PREPARE):$(VERSIONTAG) \
$(DOCKERIMAGENAME_PORTAL):$(VERSIONTAG) \
$(DOCKERIMAGENAME_CORE):$(VERSIONTAG) \ $(DOCKERIMAGENAME_CORE):$(VERSIONTAG) \
$(DOCKERIMAGENAME_LOG):$(VERSIONTAG) \
$(DOCKERIMAGENAME_DB):$(VERSIONTAG) \ $(DOCKERIMAGENAME_DB):$(VERSIONTAG) \
$(DOCKERIMAGENAME_JOBSERVICE):$(VERSIONTAG) \ $(DOCKERIMAGENAME_JOBSERVICE):$(VERSIONTAG) \
$(DOCKERIMAGENAME_REGCTL):$(VERSIONTAG) \ $(DOCKERIMAGENAME_REGCTL):$(VERSIONTAG) \
$(DOCKERIMAGENAME_EXPORTER):$(VERSIONTAG) \
$(IMAGENAMESPACE)/redis-photon:$(VERSIONTAG) \ $(IMAGENAMESPACE)/redis-photon:$(VERSIONTAG) \
$(IMAGENAMESPACE)/nginx-photon:$(VERSIONTAG) \ $(IMAGENAMESPACE)/nginx-photon:$(VERSIONTAG) \
$(IMAGENAMESPACE)/registry-photon:$(VERSIONTAG) $(IMAGENAMESPACE)/registry-photon:$(VERSIONTAG)
ifeq ($(BUILD_INSTALLER), true)
DOCKERSAVE_PARA+= $(DOCKER_IMAGE_NAME_PREPARE):$(VERSIONTAG) \
$(DOCKERIMAGENAME_LOG):$(VERSIONTAG)
endif
ifeq ($(TRIVYFLAG), true)
DOCKERSAVE_PARA+= $(IMAGENAMESPACE)/trivy-adapter-photon:$(VERSIONTAG)
endif
ifeq ($(EXPORTERFLAG), true)
DOCKERSAVE_PARA+= $(DOCKERIMAGENAME_EXPORTER):$(VERSIONTAG)
endif
PACKAGE_OFFLINE_PARA=-zcvf harbor-offline-installer-$(PKGVERSIONTAG).tgz \ PACKAGE_OFFLINE_PARA=-zcvf harbor-offline-installer-$(PKGVERSIONTAG).tgz \
$(HARBORPKG)/$(DOCKERIMGFILE).$(VERSIONTAG).tar.gz \ $(HARBORPKG)/$(DOCKERIMGFILE).$(VERSIONTAG).tar.gz \
$(HARBORPKG)/prepare \ $(HARBORPKG)/prepare \
@ -282,6 +266,11 @@ PACKAGE_ONLINE_PARA=-zcvf harbor-online-installer-$(PKGVERSIONTAG).tgz \
DOCKERCOMPOSE_FILE_OPT=-f $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME) DOCKERCOMPOSE_FILE_OPT=-f $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)
ifeq ($(TRIVYFLAG), true)
DOCKERSAVE_PARA+= $(IMAGENAMESPACE)/trivy-adapter-photon:$(VERSIONTAG)
endif
RUNCONTAINER=$(DOCKERCMD) run --rm -u $(shell id -u):$(shell id -g) -v $(BUILDPATH):$(BUILDPATH) -w $(BUILDPATH) RUNCONTAINER=$(DOCKERCMD) run --rm -u $(shell id -u):$(shell id -g) -v $(BUILDPATH):$(BUILDPATH) -w $(BUILDPATH)
# $1 the name of the docker image # $1 the name of the docker image
@ -325,7 +314,7 @@ gen_apis:
MOCKERY_IMAGENAME=$(IMAGENAMESPACE)/mockery MOCKERY_IMAGENAME=$(IMAGENAMESPACE)/mockery
MOCKERY_VERSION=v2.53.3 MOCKERY_VERSION=v2.51.0
MOCKERY=$(RUNCONTAINER)/src ${MOCKERY_IMAGENAME}:${MOCKERY_VERSION} MOCKERY=$(RUNCONTAINER)/src ${MOCKERY_IMAGENAME}:${MOCKERY_VERSION}
MOCKERY_IMAGE_BUILD_CMD=${DOCKERBUILD} -f ${TOOLSPATH}/mockery/Dockerfile --build-arg GOLANG=${GOBUILDIMAGE} --build-arg MOCKERY_VERSION=${MOCKERY_VERSION} -t ${MOCKERY_IMAGENAME}:$(MOCKERY_VERSION) . MOCKERY_IMAGE_BUILD_CMD=${DOCKERBUILD} -f ${TOOLSPATH}/mockery/Dockerfile --build-arg GOLANG=${GOBUILDIMAGE} --build-arg MOCKERY_VERSION=${MOCKERY_VERSION} -t ${MOCKERY_IMAGENAME}:$(MOCKERY_VERSION) .
@ -404,9 +393,7 @@ build:
-e REGISTRYVERSION=$(REGISTRYVERSION) -e REGISTRY_SRC_TAG=$(REGISTRY_SRC_TAG) -e DISTRIBUTION_SRC=$(DISTRIBUTION_SRC)\ -e REGISTRYVERSION=$(REGISTRYVERSION) -e REGISTRY_SRC_TAG=$(REGISTRY_SRC_TAG) -e DISTRIBUTION_SRC=$(DISTRIBUTION_SRC)\
-e TRIVYVERSION=$(TRIVYVERSION) -e TRIVYADAPTERVERSION=$(TRIVYADAPTERVERSION) \ -e TRIVYVERSION=$(TRIVYVERSION) -e TRIVYADAPTERVERSION=$(TRIVYADAPTERVERSION) \
-e VERSIONTAG=$(VERSIONTAG) \ -e VERSIONTAG=$(VERSIONTAG) \
-e DOCKERNETWORK=$(DOCKERNETWORK) \
-e BUILDREG=$(BUILDREG) -e BUILDTRIVYADP=$(BUILDTRIVYADP) \ -e BUILDREG=$(BUILDREG) -e BUILDTRIVYADP=$(BUILDTRIVYADP) \
-e BUILD_INSTALLER=$(BUILD_INSTALLER) \
-e NPM_REGISTRY=$(NPM_REGISTRY) -e BASEIMAGETAG=$(BASEIMAGETAG) -e IMAGENAMESPACE=$(IMAGENAMESPACE) -e BASEIMAGENAMESPACE=$(BASEIMAGENAMESPACE) \ -e NPM_REGISTRY=$(NPM_REGISTRY) -e BASEIMAGETAG=$(BASEIMAGETAG) -e IMAGENAMESPACE=$(IMAGENAMESPACE) -e BASEIMAGENAMESPACE=$(BASEIMAGENAMESPACE) \
-e REGISTRYURL=$(REGISTRYURL) \ -e REGISTRYURL=$(REGISTRYURL) \
-e TRIVY_DOWNLOAD_URL=$(TRIVY_DOWNLOAD_URL) -e TRIVY_ADAPTER_DOWNLOAD_URL=$(TRIVY_ADAPTER_DOWNLOAD_URL) \ -e TRIVY_DOWNLOAD_URL=$(TRIVY_DOWNLOAD_URL) -e TRIVY_ADAPTER_DOWNLOAD_URL=$(TRIVY_ADAPTER_DOWNLOAD_URL) \
@ -453,14 +440,7 @@ package_online: update_prepare_version
@rm -rf $(HARBORPKG) @rm -rf $(HARBORPKG)
@echo "Done." @echo "Done."
.PHONY: check_buildinstaller package_offline: update_prepare_version compile build
check_buildinstaller:
@if [ "$(BUILD_INSTALLER)" != "true" ]; then \
echo "Must set BUILD_INSTALLER as true while triggering package_offline build" ; \
exit 1; \
fi
package_offline: check_buildinstaller update_prepare_version compile build
@echo "packing offline package ..." @echo "packing offline package ..."
@cp -r make $(HARBORPKG) @cp -r make $(HARBORPKG)
@ -491,7 +471,7 @@ misspell:
@find . -type d \( -path ./tests \) -prune -o -name '*.go' -print | xargs misspell -error @find . -type d \( -path ./tests \) -prune -o -name '*.go' -print | xargs misspell -error
# golangci-lint binary installation or refer to https://golangci-lint.run/usage/install/#local-installation # golangci-lint binary installation or refer to https://golangci-lint.run/usage/install/#local-installation
# curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v2.1.2 # curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.55.2
GOLANGCI_LINT := $(shell go env GOPATH)/bin/golangci-lint GOLANGCI_LINT := $(shell go env GOPATH)/bin/golangci-lint
lint: lint:
@echo checking lint @echo checking lint
@ -559,7 +539,7 @@ swagger_client:
rm -rf harborclient rm -rf harborclient
mkdir -p harborclient/harbor_v2_swagger_client mkdir -p harborclient/harbor_v2_swagger_client
java -jar openapi-generator-cli.jar generate -i api/v2.0/swagger.yaml -g python -o harborclient/harbor_v2_swagger_client --package-name v2_swagger_client java -jar openapi-generator-cli.jar generate -i api/v2.0/swagger.yaml -g python -o harborclient/harbor_v2_swagger_client --package-name v2_swagger_client
cd harborclient/harbor_v2_swagger_client; pip install . cd harborclient/harbor_v2_swagger_client; python ./setup.py install
pip install docker -q pip install docker -q
pip freeze pip freeze

View File

@ -9,7 +9,6 @@
[![Nightly Status](https://us-central1-eminent-nation-87317.cloudfunctions.net/harbor-nightly-result)](https://www.googleapis.com/storage/v1/b/harbor-nightly/o) [![Nightly Status](https://us-central1-eminent-nation-87317.cloudfunctions.net/harbor-nightly-result)](https://www.googleapis.com/storage/v1/b/harbor-nightly/o)
![CONFORMANCE_TEST](https://github.com/goharbor/harbor/workflows/CONFORMANCE_TEST/badge.svg) ![CONFORMANCE_TEST](https://github.com/goharbor/harbor/workflows/CONFORMANCE_TEST/badge.svg)
[![FOSSA Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Fgoharbor%2Fharbor.svg?type=shield)](https://app.fossa.com/projects/git%2Bgithub.com%2Fgoharbor%2Fharbor?ref=badge_shield) [![FOSSA Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Fgoharbor%2Fharbor.svg?type=shield)](https://app.fossa.com/projects/git%2Bgithub.com%2Fgoharbor%2Fharbor?ref=badge_shield)
[![Artifact HUB](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/harbor)](https://artifacthub.io/packages/helm/harbor/harbor)
</br> </br>
|![notification](https://raw.githubusercontent.com/goharbor/website/master/docs/img/readme/bell-outline-badged.svg)Community Meeting| |![notification](https://raw.githubusercontent.com/goharbor/website/master/docs/img/readme/bell-outline-badged.svg)Community Meeting|

View File

@ -1,27 +1,28 @@
# Versioning and Release # Versioning and Release
This document describes the versioning and release process of Harbor. This document is a living document, it's contents will be updated according to each release. This document describes the versioning and release process of Harbor. This document is a living document, contents will be updated according to each release.
## Releases ## Releases
Harbor releases will be versioned using dotted triples, similar to [Semantic Version](http://semver.org/). For this specific document, we will refer to the respective components of this triple as `<major>.<minor>.<patch>`. The version number may have additional information, such as "-rc1,-rc2,-rc3" to mark release candidate builds for earlier access. Such releases will be considered as "pre-releases". Harbor releases will be versioned using dotted triples, similar to [Semantic Version](http://semver.org/). For this specific document, we will refer to the respective components of this triple as `<major>.<minor>.<patch>`. The version number may have additional information, such as "-rc1,-rc2,-rc3" to mark release candidate builds for earlier access. Such releases will be considered as "pre-releases".
### Major and Minor Releases ### Major and Minor Releases
Major and minor releases of Harbor will be branched from `main` when the release reaches to `RC(release candidate)` state. The branch format should follow `release-<major>.<minor>.0`. For example, once the release `v1.0.0` reaches to RC, a branch will be created with the format `release-1.0.0`. When the release reaches to `GA(General Available)` state, the tag with format `v<major>.<minor>.<patch>` and should be made with the command `git tag -s v<major>.<minor>.<patch>`. The release cadence is around 3 months, might be adjusted based on open source events, but will communicate it clearly. Major and minor releases of Harbor will be branched from `main` when the release reaches to `RC(release candidate)` state. The branch format should follow `release-<major>.<minor>.0`. For example, once the release `v1.0.0` reaches to RC, a branch will be created with the format `release-1.0.0`. When the release reaches to `GA(General Available)` state, The tag with format `v<major>.<minor>.<patch>` and should be made with command `git tag -s v<major>.<minor>.<patch>`. The release cadence is around 3 months, might be adjusted based on open source event, but will communicate it clearly.
### Patch releases ### Patch releases
Patch releases are based on the major/minor release branch, the release cadence for patch release of recent minor release is one month to solve critical community and security issues. The cadence for patch release of recent minus two minor releases are on-demand driven based on the severity of the issue to be fixed. Patch releases are based on the major/minor release branch, the release cadence for patch release of recent minor release is one month to solve critical community and security issues. The cadence for patch release of recent minus two minor releases are on-demand driven based on the severity of the issue to be fixed.
### Pre-releases ### Pre-releases
`Pre-releases:mainly the different RC builds` will be compiled from their corresponding branches. Please note that they are done to assist in the stabilization process, no guarantees are provided. `Pre-releases:mainly the different RC builds` will be compiled from their corresponding branches. Please note they are done to assist in the stabilization process, no guarantees are provided.
### Minor Release Support Matrix ### Minor Release Support Matrix
| Version | Supported | | Version | Supported |
|----------------| ------------------ | |----------------| ------------------ |
| Harbor v2.13.x | :white_check_mark: |
| Harbor v2.12.x | :white_check_mark: | | Harbor v2.12.x | :white_check_mark: |
| Harbor v2.11.x | :white_check_mark: | | Harbor v2.11.x | :white_check_mark: |
| Harbor v2.10.x | :white_check_mark: |
### Upgrade path and support policy ### Upgrade path and support policy
The upgrade path for Harbor is (1) 2.2.x patch releases are always compatible with its major and minor versions. For example, previous released 2.2.x can be upgraded to most recent 2.2.3 release. (2) Harbor only supports two previous minor releases to upgrade to current minor release. For example, 2.3.0 will only support 2.1.0 and 2.2.0 to upgrade from, 2.0.0 to 2.3.0 is not supported. One should upgrade to 2.2.0 first, then to 2.3.0. The upgrade path for Harbor is (1) 2.2.x patch releases are always compatible with its major and minor version. For example, previous released 2.2.x can be upgraded to most recent 2.2.3 release. (2) Harbor only supports two previous minor releases to upgrade to current minor release. For example, 2.3.0 will only support 2.1.0 and 2.2.0 to upgrade from, 2.0.0 to 2.3.0 is not supported. One should upgrade to 2.2.0 first, then to 2.3.0.
The Harbor project maintains release branches for the three most recent minor releases, each minor release will be maintained for approximately 9 months. The Harbor project maintains release branches for the three most recent minor releases, each minor release will be maintained for approximately 9 months.
### Next Release ### Next Release
@ -32,12 +33,12 @@ The activity for next release will be tracked in the [up-to-date project board](
The following steps outline what to do when it's time to plan for and publish a release. Depending on the release (major/minor/patch), not all the following items are needed. The following steps outline what to do when it's time to plan for and publish a release. Depending on the release (major/minor/patch), not all the following items are needed.
1. Prepare information about what's new in the release. 1. Prepare information about what's new in the release.
* For every release, update the documentation for changes that have happened in the release. See the [goharbor/website](https://github.com/goharbor/website) repo for more details on how to create documentation for a release. All documentation for a release should be published by the time the release is out. * For every release, update documentation for changes that have happened in the release. See the [goharbor/website](https://github.com/goharbor/website) repo for more details on how to create documentation for a release. All documentation for a release should be published by the time the release is out.
* For every release, write release notes. See [previous releases](https://github.com/goharbor/harbor/releases) for examples of what to include in release notes. * For every release, write release notes. See [previous releases](https://github.com/goharbor/harbor/releases) for examples of what to include in release notes.
* For a major/minor release, write a blog post that highlights new features in the release. Plan to publish this on the same day as the release. Highlight the themes, or areas of focus, for the release. Some examples of themes are security, bug fixes, feature improvements. If there are any new features or workflows introduced in a release, consider writing additional blog posts to help users learn about the new features. Plan to publish these after the release date (all blogs dont have to be published all at once). * For a major/minor release, write a blog post that highlights new features in the release. Plan to publish this the same day as the release. Highlight the themes, or areas of focus, for the release. Some examples of themes are security, bug fixes, feature improvements. If there are any new features or workflows introduced in a release, consider writing additional blog posts to help users learn about the new features. Plan to publish these after the release date (all blogs dont have to be published all at once).
1. Release a new version. Make the new version, docs updates, and blog posts available. 1. Release a new version. Make the new version, docs updates, and blog posts available.
1. Announce the release and thank contributors. We should be doing the following for all releases. 1. Announce the release and thank contributors. We should be doing the following for all releases.
* In all messages to the community include a brief list of highlights and links to the new release blog, release notes, or download location. Also include shoutouts to community members contributions included in the release. * In all messages to the community include a brief list of highlights and links to the new release blog, release notes, or download location. Also include shoutouts to community member contribution included in the release.
* Send an email to the community via the [mailing list](https://lists.cncf.io/g/harbor-users) * Send an email to the community via the [mailing list](https://lists.cncf.io/g/harbor-users)
* Post a message in the Harbor [slack channel](https://cloud-native.slack.com/archives/CC1E09J6S) * Post a message in the Harbor [slack channel](https://cloud-native.slack.com/archives/CC1E09J6S)
* Post to social media. Maintainers are encouraged to also post or repost from the Harbor account to help spread the word. * Post to social media. Maintainers are encouraged to also post or repost from the Harbor account to help spread the word.

View File

@ -9,11 +9,11 @@ This document provides a link to the [Harbor Project board](https://github.com/o
Discussion on the roadmap can take place in threads under [Issues](https://github.com/goharbor/harbor/issues) or in [community meetings](https://goharbor.io/community/). Please open and comment on an issue if you want to provide suggestions and feedback to an item in the roadmap. Please review the roadmap to avoid potential duplicated effort. Discussion on the roadmap can take place in threads under [Issues](https://github.com/goharbor/harbor/issues) or in [community meetings](https://goharbor.io/community/). Please open and comment on an issue if you want to provide suggestions and feedback to an item in the roadmap. Please review the roadmap to avoid potential duplicated effort.
### How to add an item to the roadmap? ### How to add an item to the roadmap?
Please open an issue to track any initiative on the roadmap of Harbor (Usually driven by new feature requests). We will work with and rely on our community to focus our efforts on improving Harbor. Please open an issue to track any initiative on the roadmap of Harbor (Usually driven by new feature requests). We will work with and rely on our community to focus our efforts to improve Harbor.
### Current Roadmap ### Current Roadmap
The following table includes the current roadmap for Harbor. If you have any questions or would like to contribute to Harbor, please attend a [community meeting](https://goharbor.io/community/) to discuss with our team. If you don't know where to start, we are always looking for contributors who will help us reduce technical, automation, and documentation debt. Please take the timelines & dates as proposals and goals. Priorities and requirements change based on community feedback, roadblocks encountered, community contributions, etc. If you depend on a specific item, we encourage you to attend community meetings to get updated status information, or help us deliver that feature by contributing to Harbor. The following table includes the current roadmap for Harbor. If you have any questions or would like to contribute to Harbor, please attend a [community meeting](https://goharbor.io/community/) to discuss with our team. If you don't know where to start, we are always looking for contributors that will help us reduce technical, automation, and documentation debt. Please take the timelines & dates as proposals and goals. Priorities and requirements change based on community feedback, roadblocks encountered, community contributions, etc. If you depend on a specific item, we encourage you to attend community meetings to get updated status information, or help us deliver that feature by contributing to Harbor.
`Last Updated: June 2022` `Last Updated: June 2022`
@ -49,4 +49,4 @@ The following table includes the current roadmap for Harbor. If you have any que
|I&AM and RBAC|Improved Multi-tenancy through granular access and ability to manage teams of users and robot accounts through workspaces|Dec 2020| |I&AM and RBAC|Improved Multi-tenancy through granular access and ability to manage teams of users and robot accounts through workspaces|Dec 2020|
|Observability|Expose Harbor metrics through Prometheus Integration|Mar 2021| |Observability|Expose Harbor metrics through Prometheus Integration|Mar 2021|
|Tracing|Leverage OpenTelemetry for enhanced tracing capabilities and identify bottlenecks and improve performance |Mar 2021| |Tracing|Leverage OpenTelemetry for enhanced tracing capabilities and identify bottlenecks and improve performance |Mar 2021|
|Image Signing|Leverage Sigstore Cosign to deliver persistent image signatures across image replications|Apr 2021| |Image Signing|Leverage Sigstore Cosign to deliver persisting image signatures across image replications|Apr 2021|

View File

@ -1 +1 @@
v2.14.0 v2.13.1

View File

@ -336,8 +336,6 @@ paths:
responses: responses:
'200': '200':
$ref: '#/responses/200' $ref: '#/responses/200'
'400':
$ref: '#/responses/400'
'404': '404':
$ref: '#/responses/404' $ref: '#/responses/404'
'500': '500':
@ -3031,8 +3029,6 @@ paths:
type: string type: string
'401': '401':
$ref: '#/responses/401' $ref: '#/responses/401'
'409':
$ref: '#/responses/409'
'500': '500':
$ref: '#/responses/500' $ref: '#/responses/500'
'/usergroups/{group_id}': '/usergroups/{group_id}':
@ -3564,8 +3560,6 @@ paths:
responses: responses:
'200': '200':
$ref: '#/responses/200' $ref: '#/responses/200'
'400':
$ref: '#/responses/400'
'401': '401':
$ref: '#/responses/401' $ref: '#/responses/401'
'403': '403':
@ -4004,8 +3998,6 @@ paths:
responses: responses:
'200': '200':
$ref: '#/responses/200' $ref: '#/responses/200'
'400':
$ref: '#/responses/400'
'401': '401':
$ref: '#/responses/401' $ref: '#/responses/401'
'403': '403':
@ -6146,7 +6138,6 @@ paths:
cve_id(exact match) cve_id(exact match)
cvss_score_v3(range condition) cvss_score_v3(range condition)
severity(exact match) severity(exact match)
status(exact match)
repository_name(exact match) repository_name(exact match)
project_id(exact match) project_id(exact match)
package(exact match) package(exact match)
@ -10075,9 +10066,6 @@ definitions:
severity: severity:
type: string type: string
description: the severity of the vulnerability description: the severity of the vulnerability
status:
type: string
description: the status of the vulnerability, example "fixed", "won't fix"
cvss_v3_score: cvss_v3_score:
type: number type: number
format: float format: float

View File

@ -1,9 +0,0 @@
ALTER TABLE role_permission ALTER COLUMN id TYPE BIGINT;
ALTER SEQUENCE role_permission_id_seq AS BIGINT;
ALTER TABLE permission_policy ALTER COLUMN id TYPE BIGINT;
ALTER SEQUENCE permission_policy_id_seq AS BIGINT;
ALTER TABLE role_permission ALTER COLUMN permission_policy_id TYPE BIGINT;
ALTER TABLE vulnerability_record ADD COLUMN IF NOT EXISTS status text;

View File

@ -18,7 +18,7 @@ TIMESTAMP=$(shell date +"%Y%m%d")
# docker parameters # docker parameters
DOCKERCMD=$(shell which docker) DOCKERCMD=$(shell which docker)
DOCKERBUILD=$(DOCKERCMD) build --no-cache --network=$(DOCKERNETWORK) DOCKERBUILD=$(DOCKERCMD) build --no-cache
DOCKERBUILD_WITH_PULL_PARA=$(DOCKERBUILD) --pull=$(PULL_BASE_FROM_DOCKERHUB) DOCKERBUILD_WITH_PULL_PARA=$(DOCKERBUILD) --pull=$(PULL_BASE_FROM_DOCKERHUB)
DOCKERRMIMAGE=$(DOCKERCMD) rmi DOCKERRMIMAGE=$(DOCKERCMD) rmi
DOCKERIMAGES=$(DOCKERCMD) images DOCKERIMAGES=$(DOCKERCMD) images
@ -154,7 +154,7 @@ _build_trivy_adapter:
$(call _extract_archive, $(TRIVY_ADAPTER_DOWNLOAD_URL), $(DOCKERFILEPATH_TRIVY_ADAPTER)/binary/) ; \ $(call _extract_archive, $(TRIVY_ADAPTER_DOWNLOAD_URL), $(DOCKERFILEPATH_TRIVY_ADAPTER)/binary/) ; \
else \ else \
echo "Building Trivy adapter $(TRIVYADAPTERVERSION) from sources..." ; \ echo "Building Trivy adapter $(TRIVYADAPTERVERSION) from sources..." ; \
cd $(DOCKERFILEPATH_TRIVY_ADAPTER) && $(DOCKERFILEPATH_TRIVY_ADAPTER)/builder.sh $(TRIVYADAPTERVERSION) $(GOBUILDIMAGE) $(DOCKERNETWORK) && cd - ; \ cd $(DOCKERFILEPATH_TRIVY_ADAPTER) && $(DOCKERFILEPATH_TRIVY_ADAPTER)/builder.sh $(TRIVYADAPTERVERSION) $(GOBUILDIMAGE) && cd - ; \
fi ; \ fi ; \
echo "Building Trivy adapter container for photon..." ; \ echo "Building Trivy adapter container for photon..." ; \
$(DOCKERBUILD_WITH_PULL_PARA) --build-arg harbor_base_image_version=$(BASEIMAGETAG) \ $(DOCKERBUILD_WITH_PULL_PARA) --build-arg harbor_base_image_version=$(BASEIMAGETAG) \
@ -178,7 +178,7 @@ _build_registry:
rm -rf $(DOCKERFILEPATH_REG)/binary && mkdir -p $(DOCKERFILEPATH_REG)/binary && \ rm -rf $(DOCKERFILEPATH_REG)/binary && mkdir -p $(DOCKERFILEPATH_REG)/binary && \
$(call _get_binary, $(REGISTRYURL), $(DOCKERFILEPATH_REG)/binary/registry); \ $(call _get_binary, $(REGISTRYURL), $(DOCKERFILEPATH_REG)/binary/registry); \
else \ else \
cd $(DOCKERFILEPATH_REG) && $(DOCKERFILEPATH_REG)/builder $(REGISTRY_SRC_TAG) $(DISTRIBUTION_SRC) $(GOBUILDIMAGE) $(DOCKERNETWORK) && cd - ; \ cd $(DOCKERFILEPATH_REG) && $(DOCKERFILEPATH_REG)/builder $(REGISTRY_SRC_TAG) $(DISTRIBUTION_SRC) $(GOBUILDIMAGE) && cd - ; \
fi fi
@echo "building registry container for photon..." @echo "building registry container for photon..."
@chmod 655 $(DOCKERFILEPATH_REG)/binary/registry && $(DOCKERBUILD_WITH_PULL_PARA) --build-arg harbor_base_image_version=$(BASEIMAGETAG) --build-arg harbor_base_namespace=$(BASEIMAGENAMESPACE) -f $(DOCKERFILEPATH_REG)/$(DOCKERFILENAME_REG) -t $(DOCKERIMAGENAME_REG):$(VERSIONTAG) . @chmod 655 $(DOCKERFILEPATH_REG)/binary/registry && $(DOCKERBUILD_WITH_PULL_PARA) --build-arg harbor_base_image_version=$(BASEIMAGETAG) --build-arg harbor_base_namespace=$(BASEIMAGENAMESPACE) -f $(DOCKERFILEPATH_REG)/$(DOCKERFILENAME_REG) -t $(DOCKERIMAGENAME_REG):$(VERSIONTAG) .
@ -233,17 +233,10 @@ define _build_base
fi fi
endef endef
ifeq ($(BUILD_INSTALLER), true) build: _build_prepare _build_db _build_portal _build_core _build_jobservice _build_log _build_nginx _build_registry _build_registryctl _build_trivy_adapter _build_redis _compile_and_build_exporter
buildcompt: _build_prepare _build_db _build_portal _build_core _build_jobservice _build_log _build_nginx _build_registry _build_registryctl _build_trivy_adapter _build_redis _compile_and_build_exporter
else
buildcompt: _build_db _build_portal _build_core _build_jobservice _build_nginx _build_registry _build_registryctl _build_trivy_adapter _build_redis _compile_and_build_exporter
endif
build: buildcompt
@if [ -n "$(REGISTRYUSER)" ] && [ -n "$(REGISTRYPASSWORD)" ] ; then \ @if [ -n "$(REGISTRYUSER)" ] && [ -n "$(REGISTRYPASSWORD)" ] ; then \
docker logout ; \ docker logout ; \
fi fi
cleanimage: cleanimage:
@echo "cleaning image for photon..." @echo "cleaning image for photon..."
- $(DOCKERRMIMAGE) -f $(DOCKERIMAGENAME_PORTAL):$(VERSIONTAG) - $(DOCKERRMIMAGE) -f $(DOCKERIMAGENAME_PORTAL):$(VERSIONTAG)

View File

@ -1,7 +1,7 @@
FROM photon:5.0 FROM photon:5.0
RUN tdnf install -y python3 python3-pip python3-PyYAML python3-jinja2 && tdnf clean all RUN tdnf install -y python3 python3-pip python3-PyYAML python3-jinja2 && tdnf clean all
RUN pip3 install pipenv==2025.0.3 RUN pip3 install pipenv==2022.1.8
#To install only htpasswd binary from photon package httpd #To install only htpasswd binary from photon package httpd
RUN tdnf install -y rpm cpio apr-util RUN tdnf install -y rpm cpio apr-util

View File

@ -12,4 +12,4 @@ pylint = "*"
pytest = "*" pytest = "*"
[requires] [requires]
python_version = "3.13" python_version = "3.9.1"

View File

@ -1,11 +1,11 @@
{ {
"_meta": { "_meta": {
"hash": { "hash": {
"sha256": "d3a89b8575c29b9f822b892ffd31fd4a997effb1ebf3e3ed061a41e2d04b4490" "sha256": "0c84f574a48755d88f78a64d754b3f834a72f2a86808370dd5f3bf3e650bfa13"
}, },
"pipfile-spec": 6, "pipfile-spec": 6,
"requires": { "requires": {
"python_version": "3.13" "python_version": "3.9.1"
}, },
"sources": [ "sources": [
{ {
@ -18,122 +18,157 @@
"default": { "default": {
"click": { "click": {
"hashes": [ "hashes": [
"sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202", "sha256:8c04c11192119b1ef78ea049e0a6f0463e4c48ef00a30160c704337586f3ad7a",
"sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b" "sha256:fba402a4a47334742d782209a7c79bc448911afe1149d07bdabdf480b3e2f4b6"
], ],
"index": "pypi", "index": "pypi",
"markers": "python_version >= '3.10'", "version": "==8.0.1"
"version": "==8.2.1"
}, },
"packaging": { "packaging": {
"hashes": [ "hashes": [
"sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", "sha256:5b327ac1320dc863dca72f4514ecc086f31186744b84a230374cc1fd776feae5",
"sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f" "sha256:67714da7f7bc052e064859c05c595155bd1ee9f69f76557e21f051443c20947a"
], ],
"index": "pypi", "index": "pypi",
"markers": "python_version >= '3.8'", "version": "==20.9"
"version": "==25.0" },
"pyparsing": {
"hashes": [
"sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1",
"sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b"
],
"markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==2.4.7"
} }
}, },
"develop": { "develop": {
"astroid": { "astroid": {
"hashes": [ "hashes": [
"sha256:104fb9cb9b27ea95e847a94c003be03a9e039334a8ebca5ee27dafaf5c5711eb", "sha256:4db03ab5fc3340cf619dbc25e42c2cc3755154ce6009469766d7143d1fc2ee4e",
"sha256:c332157953060c6deb9caa57303ae0d20b0fbdb2e59b4a4f2a6ba49d0a7961ce" "sha256:8a398dfce302c13f14bab13e2b14fe385d32b73f4e4853b9bdfb64598baa1975"
], ],
"markers": "python_full_version >= '3.9.0'", "markers": "python_version ~= '3.6'",
"version": "==3.3.10" "version": "==2.5.6"
}, },
"dill": { "attrs": {
"hashes": [ "hashes": [
"sha256:0633f1d2df477324f53a895b02c901fb961bdbf65a17122586ea7019292cbcf0", "sha256:149e90d6d8ac20db7a955ad60cf0e6881a3f20d37096140088356da6c716b0b1",
"sha256:44f54bf6412c2c8464c14e8243eb163690a9800dbe2c367330883b19c7561049" "sha256:ef6aaac3ca6cd92904cdd0d83f629a15f18053ec84e6432106f7a4d04ae4f5fb"
], ],
"markers": "python_version >= '3.8'", "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'",
"version": "==0.4.0" "version": "==21.2.0"
}, },
"iniconfig": { "iniconfig": {
"hashes": [ "hashes": [
"sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7", "sha256:011e24c64b7f47f6ebd835bb12a743f2fbe9a26d4cecaa7f53bc4f35ee9da8b3",
"sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760" "sha256:bc3af051d7d14b2ee5ef9969666def0cd1a000e121eaea580d4a313df4b37f32"
], ],
"markers": "python_version >= '3.8'", "version": "==1.1.1"
"version": "==2.1.0"
}, },
"isort": { "isort": {
"hashes": [ "hashes": [
"sha256:1cb5df28dfbc742e490c5e41bad6da41b805b0a8be7bc93cd0fb2a8a890ac450", "sha256:0a943902919f65c5684ac4e0154b1ad4fac6dcaa5d9f3426b732f1c8b5419be6",
"sha256:2dc5d7f65c9678d94c88dfc29161a320eec67328bc97aad576874cb4be1e9615" "sha256:2bb1680aad211e3c9944dbce1d4ba09a989f04e238296c87fe2139faa26d655d"
], ],
"markers": "python_full_version >= '3.9.0'", "markers": "python_version >= '3.6' and python_version < '4.0'",
"version": "==6.0.1" "version": "==5.8.0"
},
"lazy-object-proxy": {
"hashes": [
"sha256:17e0967ba374fc24141738c69736da90e94419338fd4c7c7bef01ee26b339653",
"sha256:1fee665d2638491f4d6e55bd483e15ef21f6c8c2095f235fef72601021e64f61",
"sha256:22ddd618cefe54305df49e4c069fa65715be4ad0e78e8d252a33debf00f6ede2",
"sha256:24a5045889cc2729033b3e604d496c2b6f588c754f7a62027ad4437a7ecc4837",
"sha256:410283732af311b51b837894fa2f24f2c0039aa7f220135192b38fcc42bd43d3",
"sha256:4732c765372bd78a2d6b2150a6e99d00a78ec963375f236979c0626b97ed8e43",
"sha256:489000d368377571c6f982fba6497f2aa13c6d1facc40660963da62f5c379726",
"sha256:4f60460e9f1eb632584c9685bccea152f4ac2130e299784dbaf9fae9f49891b3",
"sha256:5743a5ab42ae40caa8421b320ebf3a998f89c85cdc8376d6b2e00bd12bd1b587",
"sha256:85fb7608121fd5621cc4377a8961d0b32ccf84a7285b4f1d21988b2eae2868e8",
"sha256:9698110e36e2df951c7c36b6729e96429c9c32b3331989ef19976592c5f3c77a",
"sha256:9d397bf41caad3f489e10774667310d73cb9c4258e9aed94b9ec734b34b495fd",
"sha256:b579f8acbf2bdd9ea200b1d5dea36abd93cabf56cf626ab9c744a432e15c815f",
"sha256:b865b01a2e7f96db0c5d12cfea590f98d8c5ba64ad222300d93ce6ff9138bcad",
"sha256:bf34e368e8dd976423396555078def5cfc3039ebc6fc06d1ae2c5a65eebbcde4",
"sha256:c6938967f8528b3668622a9ed3b31d145fab161a32f5891ea7b84f6b790be05b",
"sha256:d1c2676e3d840852a2de7c7d5d76407c772927addff8d742b9808fe0afccebdf",
"sha256:d7124f52f3bd259f510651450e18e0fd081ed82f3c08541dffc7b94b883aa981",
"sha256:d900d949b707778696fdf01036f58c9876a0d8bfe116e8d220cfd4b15f14e741",
"sha256:ebfd274dcd5133e0afae738e6d9da4323c3eb021b3e13052d8cbd0e457b1256e",
"sha256:ed361bb83436f117f9917d282a456f9e5009ea12fd6de8742d1a4752c3017e93",
"sha256:f5144c75445ae3ca2057faac03fda5a902eff196702b0a24daf1d6ce0650514b"
],
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'",
"version": "==1.6.0"
}, },
"mccabe": { "mccabe": {
"hashes": [ "hashes": [
"sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325", "sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42",
"sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e" "sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f"
], ],
"markers": "python_version >= '3.6'", "version": "==0.6.1"
"version": "==0.7.0"
}, },
"packaging": { "packaging": {
"hashes": [ "hashes": [
"sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", "sha256:5b327ac1320dc863dca72f4514ecc086f31186744b84a230374cc1fd776feae5",
"sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f" "sha256:67714da7f7bc052e064859c05c595155bd1ee9f69f76557e21f051443c20947a"
], ],
"index": "pypi", "index": "pypi",
"markers": "python_version >= '3.8'", "version": "==20.9"
"version": "==25.0"
},
"platformdirs": {
"hashes": [
"sha256:3d512d96e16bcb959a814c9f348431070822a6496326a4be0911c40b5a74c2bc",
"sha256:ff7059bb7eb1179e2685604f4aaf157cfd9535242bd23742eadc3c13542139b4"
],
"markers": "python_version >= '3.9'",
"version": "==4.3.8"
}, },
"pluggy": { "pluggy": {
"hashes": [ "hashes": [
"sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", "sha256:15b2acde666561e1298d71b523007ed7364de07029219b604cf808bfa1c765b0",
"sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746" "sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d"
], ],
"markers": "python_version >= '3.9'", "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==1.6.0" "version": "==0.13.1"
}, },
"pygments": { "py": {
"hashes": [ "hashes": [
"sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", "sha256:21b81bda15b66ef5e1a777a21c4dcd9c20ad3efd0b3f817e7a809035269e1bd3",
"sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b" "sha256:3b80836aa6d1feeaa108e046da6423ab8f6ceda6468545ae8d02d9d58d18818a"
], ],
"markers": "python_version >= '3.8'", "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==2.19.2" "version": "==1.10.0"
}, },
"pylint": { "pylint": {
"hashes": [ "hashes": [
"sha256:2b11de8bde49f9c5059452e0c310c079c746a0a8eeaa789e5aa966ecc23e4559", "sha256:586d8fa9b1891f4b725f587ef267abe2a1bad89d6b184520c7f07a253dd6e217",
"sha256:43860aafefce92fca4cf6b61fe199cdc5ae54ea28f9bf4cd49de267b5195803d" "sha256:f7e2072654a6b6afdf5e2fb38147d3e2d2d43c89f648637baab63e026481279b"
], ],
"index": "pypi", "index": "pypi",
"markers": "python_full_version >= '3.9.0'", "version": "==2.8.2"
"version": "==3.3.7" },
"pyparsing": {
"hashes": [
"sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1",
"sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b"
],
"markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==2.4.7"
}, },
"pytest": { "pytest": {
"hashes": [ "hashes": [
"sha256:539c70ba6fcead8e78eebbf1115e8b589e7565830d7d006a8723f19ac8a0afb7", "sha256:50bcad0a0b9c5a72c8e4e7c9855a3ad496ca6a881a3641b4260605450772c54b",
"sha256:7c67fd69174877359ed9371ec3af8a3d2b04741818c51e5e99cc1742251fa93c" "sha256:91ef2131a9bd6be8f76f1f08eac5c5317221d6ad1e143ae03894b862e8976890"
], ],
"index": "pypi", "index": "pypi",
"markers": "python_version >= '3.9'", "version": "==6.2.4"
"version": "==8.4.1"
}, },
"tomlkit": { "toml": {
"hashes": [ "hashes": [
"sha256:430cf247ee57df2b94ee3fbe588e71d362a941ebb545dec29b53961d61add2a1", "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b",
"sha256:c89c649d79ee40629a9fda55f8ace8c6a1b42deb912b2a8fd8d942ddadb606b0" "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"
], ],
"markers": "python_version >= '3.8'", "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==0.13.3" "version": "==0.10.2"
},
"wrapt": {
"hashes": [
"sha256:b62ffa81fb85f4332a4f609cab4ac40709470da05643a082ec1eb88e6d9b97d7"
],
"version": "==1.12.1"
} }
} }
} }

View File

@ -41,7 +41,6 @@ REGISTRY_CREDENTIAL_PASSWORD={{registry_password}}
CSRF_KEY={{csrf_key}} CSRF_KEY={{csrf_key}}
ROBOT_SCANNER_NAME_PREFIX={{scan_robot_prefix}} ROBOT_SCANNER_NAME_PREFIX={{scan_robot_prefix}}
PERMITTED_REGISTRY_TYPES_FOR_PROXY_CACHE=docker-hub,harbor,azure-acr,ali-acr,aws-ecr,google-gcr,quay,docker-registry,github-ghcr,jfrog-artifactory PERMITTED_REGISTRY_TYPES_FOR_PROXY_CACHE=docker-hub,harbor,azure-acr,ali-acr,aws-ecr,google-gcr,quay,docker-registry,github-ghcr,jfrog-artifactory
REPLICATION_ADAPTER_WHITELIST=ali-acr,aws-ecr,azure-acr,docker-hub,docker-registry,github-ghcr,google-gcr,harbor,huawei-SWR,jfrog-artifactory,tencent-tcr,volcengine-cr
HTTP_PROXY={{core_http_proxy}} HTTP_PROXY={{core_http_proxy}}
HTTPS_PROXY={{core_https_proxy}} HTTPS_PROXY={{core_https_proxy}}

View File

@ -23,6 +23,7 @@ HTTPS_PROXY={{jobservice_https_proxy}}
NO_PROXY={{jobservice_no_proxy}} NO_PROXY={{jobservice_no_proxy}}
REGISTRY_CREDENTIAL_USERNAME={{registry_username}} REGISTRY_CREDENTIAL_USERNAME={{registry_username}}
REGISTRY_CREDENTIAL_PASSWORD={{registry_password}} REGISTRY_CREDENTIAL_PASSWORD={{registry_password}}
MAX_JOB_DURATION_SECONDS={{max_job_duration_seconds}}
{% if metric.enabled %} {% if metric.enabled %}
METRIC_NAMESPACE=harbor METRIC_NAMESPACE=harbor

View File

@ -227,6 +227,7 @@ def parse_yaml_config(config_file_path, with_trivy):
value = config_dict["max_job_duration_hours"] value = config_dict["max_job_duration_hours"]
if not isinstance(value, int) or value < 24: if not isinstance(value, int) or value < 24:
config_dict["max_job_duration_hours"] = 24 config_dict["max_job_duration_hours"] = 24
config_dict['max_job_duration_seconds'] = config_dict['max_job_duration_hours'] * 3600
config_dict['job_loggers'] = js_config["job_loggers"] config_dict['job_loggers'] = js_config["job_loggers"]
config_dict['logger_sweeper_duration'] = js_config["logger_sweeper_duration"] config_dict['logger_sweeper_duration'] = js_config["logger_sweeper_duration"]
config_dict['jobservice_secret'] = generate_random_string(16) config_dict['jobservice_secret'] = generate_random_string(16)

View File

@ -34,6 +34,7 @@ def prepare_job_service(config_dict):
internal_tls=config_dict['internal_tls'], internal_tls=config_dict['internal_tls'],
max_job_workers=config_dict['max_job_workers'], max_job_workers=config_dict['max_job_workers'],
max_job_duration_hours=config_dict['max_job_duration_hours'], max_job_duration_hours=config_dict['max_job_duration_hours'],
max_job_duration_seconds=config_dict['max_job_duration_seconds'],
job_loggers=config_dict['job_loggers'], job_loggers=config_dict['job_loggers'],
logger_sweeper_duration=config_dict['logger_sweeper_duration'], logger_sweeper_duration=config_dict['logger_sweeper_duration'],
redis_url=config_dict['redis_url_js'], redis_url=config_dict['redis_url_js'],

View File

@ -15,7 +15,6 @@ fi
VERSION="$1" VERSION="$1"
DISTRIBUTION_SRC="$2" DISTRIBUTION_SRC="$2"
GOBUILDIMAGE="$3" GOBUILDIMAGE="$3"
DOCKERNETWORK="$4"
set -e set -e
@ -34,7 +33,7 @@ cd $cur
echo 'build the registry binary ...' echo 'build the registry binary ...'
cp Dockerfile.binary $TEMP cp Dockerfile.binary $TEMP
docker build --network=$DOCKERNETWORK --build-arg golang_image=$GOBUILDIMAGE -f $TEMP/Dockerfile.binary -t registry-golang $TEMP docker build --build-arg golang_image=$GOBUILDIMAGE -f $TEMP/Dockerfile.binary -t registry-golang $TEMP
echo 'copy the registry binary to local...' echo 'copy the registry binary to local...'
ID=$(docker create registry-golang) ID=$(docker create registry-golang)

View File

@ -9,7 +9,6 @@ fi
VERSION="$1" VERSION="$1"
GOBUILDIMAGE="$2" GOBUILDIMAGE="$2"
DOCKERNETWORK="$3"
set -e set -e
@ -23,7 +22,7 @@ cd $TEMP; git checkout $VERSION; cd -
echo "Building Trivy adapter binary ..." echo "Building Trivy adapter binary ..."
cp Dockerfile.binary $TEMP cp Dockerfile.binary $TEMP
docker build --network=$DOCKERNETWORK --build-arg golang_image=$GOBUILDIMAGE -f $TEMP/Dockerfile.binary -t trivy-adapter-golang $TEMP docker build --build-arg golang_image=$GOBUILDIMAGE -f $TEMP/Dockerfile.binary -t trivy-adapter-golang $TEMP
echo "Copying Trivy adapter binary from the container to the local directory..." echo "Copying Trivy adapter binary from the container to the local directory..."
ID=$(docker create trivy-adapter-golang) ID=$(docker create trivy-adapter-golang)

View File

@ -1,56 +1,76 @@
version: "2" linters-settings:
linters: gofmt:
default: none # Simplify code: gofmt with `-s` option.
enable: # Default: true
- bodyclose simplify: false
- errcheck
- goheader
- govet
- ineffassign
- misspell
- revive
- staticcheck
- whitespace
settings:
goheader:
template-path: copyright.tmpl
misspell: misspell:
locale: US,UK locale: US,UK
staticcheck: goimports:
checks: local-prefixes: github.com/goharbor/harbor
- ST1019 stylecheck:
exclusions: checks: [
generated: lax "ST1019", # Importing the same package multiple times.
presets: ]
- comments goheader:
- common-false-positives template-path: copyright.tmpl
- legacy
- std-error-handling linters:
paths: disable-all: true
- third_party$
- builtin$
- examples$
- .*_test\.go
- .*test\.go
- testing
- src/jobservice/mgt/mock_manager.go
formatters:
enable: enable:
- gofmt - gofmt
- goheader
- misspell
- typecheck
# - dogsled
# - dupl
# - depguard
# - funlen
# - goconst
# - gocritic
# - gocyclo
# - goimports
# - goprintffuncname
- ineffassign
# - nakedret
# - nolintlint
- revive
- whitespace
- bodyclose
- errcheck
# - gosec
- gosimple
- goimports
- govet
# - noctx
# - rowserrcheck
- staticcheck
- stylecheck
# - unconvert
# - unparam
# - unused // disabled due to too many false positive check and limited support golang 1.19 https://github.com/dominikh/go-tools/issues/1282
run:
skip-files:
- ".*_test.go"
- ".*test.go"
skip-dirs:
- "testing"
timeout: 20m
issue:
max-same-issues: 0
max-per-linter: 0
issues:
# Excluding configuration per-path, per-linter, per-text and per-source
exclude-rules:
# Exclude some linters from running on tests files.
- path: _test\.go
linters:
- goimports
- path: src/testing/*.go
linters:
- goimports
- path: src/jobservice/mgt/mock_manager.go
linters:
- goimports - goimports
settings:
gofmt:
simplify: false
goimports:
local-prefixes:
- github.com/goharbor/harbor
exclusions:
generated: lax
paths:
- third_party$
- builtin$
- examples$
- .*_test\.go
- .*test\.go
- testing
- src/jobservice/mgt/mock_manager.go

View File

@ -78,7 +78,7 @@ func (b *BaseAPI) RenderError(code int, text string) {
} }
// DecodeJSONReq decodes a json request // DecodeJSONReq decodes a json request
func (b *BaseAPI) DecodeJSONReq(v any) error { func (b *BaseAPI) DecodeJSONReq(v interface{}) error {
err := json.Unmarshal(b.Ctx.Input.CopyBody(1<<35), v) err := json.Unmarshal(b.Ctx.Input.CopyBody(1<<35), v)
if err != nil { if err != nil {
log.Errorf("Error while decoding the json request, error: %v, %v", log.Errorf("Error while decoding the json request, error: %v, %v",
@ -89,7 +89,7 @@ func (b *BaseAPI) DecodeJSONReq(v any) error {
} }
// Validate validates v if it implements interface validation.ValidFormer // Validate validates v if it implements interface validation.ValidFormer
func (b *BaseAPI) Validate(v any) (bool, error) { func (b *BaseAPI) Validate(v interface{}) (bool, error) {
validator := validation.Validation{} validator := validation.Validation{}
isValid, err := validator.Valid(v) isValid, err := validator.Valid(v)
if err != nil { if err != nil {
@ -108,7 +108,7 @@ func (b *BaseAPI) Validate(v any) (bool, error) {
} }
// DecodeJSONReqAndValidate does both decoding and validation // DecodeJSONReqAndValidate does both decoding and validation
func (b *BaseAPI) DecodeJSONReqAndValidate(v any) (bool, error) { func (b *BaseAPI) DecodeJSONReqAndValidate(v interface{}) (bool, error) {
if err := b.DecodeJSONReq(v); err != nil { if err := b.DecodeJSONReq(v); err != nil {
return false, err return false, err
} }

View File

@ -252,7 +252,4 @@ const (
// Global Leeway used for token validation // Global Leeway used for token validation
JwtLeeway = 60 * time.Second JwtLeeway = 60 * time.Second
// The replication adapter whitelist
ReplicationAdapterWhiteList = "REPLICATION_ADAPTER_WHITELIST"
) )

View File

@ -144,6 +144,6 @@ func (l *mLogger) Verbose() bool {
} }
// Printf ... // Printf ...
func (l *mLogger) Printf(format string, v ...any) { func (l *mLogger) Printf(format string, v ...interface{}) {
l.logger.Infof(format, v...) l.logger.Infof(format, v...)
} }

View File

@ -29,7 +29,7 @@ import (
var testCtx context.Context var testCtx context.Context
func execUpdate(o orm.TxOrmer, sql string, params ...any) error { func execUpdate(o orm.TxOrmer, sql string, params ...interface{}) error {
p, err := o.Raw(sql).Prepare() p, err := o.Raw(sql).Prepare()
if err != nil { if err != nil {
return err return err

View File

@ -27,7 +27,7 @@ func TestMaxOpenConns(t *testing.T) {
queryNum := 200 queryNum := 200
results := make([]bool, queryNum) results := make([]bool, queryNum)
for i := range queryNum { for i := 0; i < queryNum; i++ {
wg.Add(1) wg.Add(1)
go func(i int) { go func(i int) {
defer wg.Done() defer wg.Done()

View File

@ -142,7 +142,7 @@ func ArrayEqual(arrayA, arrayB []int) bool {
return false return false
} }
size := len(arrayA) size := len(arrayA)
for i := range size { for i := 0; i < size; i++ {
if arrayA[i] != arrayB[i] { if arrayA[i] != arrayB[i] {
return false return false
} }

View File

@ -69,7 +69,7 @@ func (c *Client) Do(req *http.Request) (*http.Response, error) {
} }
// Get ... // Get ...
func (c *Client) Get(url string, v ...any) error { func (c *Client) Get(url string, v ...interface{}) error {
req, err := http.NewRequest(http.MethodGet, url, nil) req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil { if err != nil {
return err return err
@ -98,7 +98,7 @@ func (c *Client) Head(url string) error {
} }
// Post ... // Post ...
func (c *Client) Post(url string, v ...any) error { func (c *Client) Post(url string, v ...interface{}) error {
var reader io.Reader var reader io.Reader
if len(v) > 0 { if len(v) > 0 {
if r, ok := v[0].(io.Reader); ok { if r, ok := v[0].(io.Reader); ok {
@ -123,7 +123,7 @@ func (c *Client) Post(url string, v ...any) error {
} }
// Put ... // Put ...
func (c *Client) Put(url string, v ...any) error { func (c *Client) Put(url string, v ...interface{}) error {
var reader io.Reader var reader io.Reader
if len(v) > 0 { if len(v) > 0 {
data, err := json.Marshal(v[0]) data, err := json.Marshal(v[0])
@ -176,7 +176,7 @@ func (c *Client) do(req *http.Request) ([]byte, error) {
// GetAndIteratePagination iterates the pagination header and returns all resources // GetAndIteratePagination iterates the pagination header and returns all resources
// The parameter "v" must be a pointer to a slice // The parameter "v" must be a pointer to a slice
func (c *Client) GetAndIteratePagination(endpoint string, v any) error { func (c *Client) GetAndIteratePagination(endpoint string, v interface{}) error {
url, err := url.Parse(endpoint) url, err := url.Parse(endpoint)
if err != nil { if err != nil {
return err return err

View File

@ -15,7 +15,7 @@
package models package models
// Parameters for job execution. // Parameters for job execution.
type Parameters map[string]any type Parameters map[string]interface{}
// JobRequest is the request of launching a job. // JobRequest is the request of launching a job.
type JobRequest struct { type JobRequest struct {
@ -96,5 +96,5 @@ type JobStatusChange struct {
// Message is designed for sub/pub messages // Message is designed for sub/pub messages
type Message struct { type Message struct {
Event string Event string
Data any // generic format Data interface{} // generic format
} }

View File

@ -119,7 +119,7 @@ func BenchmarkProjectEvaluator(b *testing.B) {
resource := NewNamespace(public.ProjectID).Resource(rbac.ResourceRepository) resource := NewNamespace(public.ProjectID).Resource(rbac.ResourceRepository)
b.ResetTimer() b.ResetTimer()
for b.Loop() { for i := 0; i < b.N; i++ {
evaluator.HasPermission(context.TODO(), resource, rbac.ActionPull) evaluator.HasPermission(context.TODO(), resource, rbac.ActionPull)
} }
} }

View File

@ -43,7 +43,7 @@ func (ns *projectNamespace) Resource(subresources ...types.Resource) types.Resou
return types.Resource(fmt.Sprintf("/project/%d", ns.projectID)).Subresource(subresources...) return types.Resource(fmt.Sprintf("/project/%d", ns.projectID)).Subresource(subresources...)
} }
func (ns *projectNamespace) Identity() any { func (ns *projectNamespace) Identity() interface{} {
return ns.projectID return ns.projectID
} }

View File

@ -162,7 +162,6 @@ var (
{Resource: rbac.ResourceRobot, Action: rbac.ActionRead}, {Resource: rbac.ResourceRobot, Action: rbac.ActionRead},
{Resource: rbac.ResourceRobot, Action: rbac.ActionList}, {Resource: rbac.ResourceRobot, Action: rbac.ActionList},
{Resource: rbac.ResourceNotificationPolicy, Action: rbac.ActionRead},
{Resource: rbac.ResourceNotificationPolicy, Action: rbac.ActionList}, {Resource: rbac.ResourceNotificationPolicy, Action: rbac.ActionList},
{Resource: rbac.ResourceScan, Action: rbac.ActionCreate}, {Resource: rbac.ResourceScan, Action: rbac.ActionCreate},

View File

@ -38,7 +38,7 @@ func (ns *systemNamespace) Resource(subresources ...types.Resource) types.Resour
return types.Resource("/system/").Subresource(subresources...) return types.Resource("/system/").Subresource(subresources...)
} }
func (ns *systemNamespace) Identity() any { func (ns *systemNamespace) Identity() interface{} {
return nil return nil
} }

View File

@ -63,7 +63,7 @@ func (t *tokenSecurityCtx) GetMyProjects() ([]*models.Project, error) {
return []*models.Project{}, nil return []*models.Project{}, nil
} }
func (t *tokenSecurityCtx) GetProjectRoles(_ any) []int { func (t *tokenSecurityCtx) GetProjectRoles(_ interface{}) []int {
return []int{} return []int{}
} }

View File

@ -18,7 +18,7 @@ import (
"github.com/goharbor/harbor/src/common" "github.com/goharbor/harbor/src/common"
) )
var defaultConfig = map[string]any{ var defaultConfig = map[string]interface{}{
common.ExtEndpoint: "https://host01.com", common.ExtEndpoint: "https://host01.com",
common.AUTHMode: common.DBAuth, common.AUTHMode: common.DBAuth,
common.DatabaseType: "postgresql", common.DatabaseType: "postgresql",
@ -66,6 +66,6 @@ var defaultConfig = map[string]any{
} }
// GetDefaultConfigMap returns the default config map for easier modification. // GetDefaultConfigMap returns the default config map for easier modification.
func GetDefaultConfigMap() map[string]any { func GetDefaultConfigMap() map[string]interface{} {
return defaultConfig return defaultConfig
} }

View File

@ -30,7 +30,7 @@ type GCResult struct {
} }
// NewRegistryCtl returns a mock registry server // NewRegistryCtl returns a mock registry server
func NewRegistryCtl(_ map[string]any) (*httptest.Server, error) { func NewRegistryCtl(_ map[string]interface{}) (*httptest.Server, error) {
m := []*RequestHandlerMapping{} m := []*RequestHandlerMapping{}
gcr := GCResult{true, "hello-world", time.Now(), time.Now()} gcr := GCResult{true, "hello-world", time.Now(), time.Now()}

View File

@ -94,9 +94,9 @@ func NewServer(mappings ...*RequestHandlerMapping) *httptest.Server {
} }
// GetUnitTestConfig ... // GetUnitTestConfig ...
func GetUnitTestConfig() map[string]any { func GetUnitTestConfig() map[string]interface{} {
ipAddress := os.Getenv("IP") ipAddress := os.Getenv("IP")
return map[string]any{ return map[string]interface{}{
common.ExtEndpoint: fmt.Sprintf("https://%s", ipAddress), common.ExtEndpoint: fmt.Sprintf("https://%s", ipAddress),
common.AUTHMode: "db_auth", common.AUTHMode: "db_auth",
common.DatabaseType: "postgresql", common.DatabaseType: "postgresql",
@ -130,7 +130,7 @@ func GetUnitTestConfig() map[string]any {
} }
// TraceCfgMap ... // TraceCfgMap ...
func TraceCfgMap(cfgs map[string]any) { func TraceCfgMap(cfgs map[string]interface{}) {
var keys []string var keys []string
for k := range cfgs { for k := range cfgs {
keys = append(keys, k) keys = append(keys, k)

View File

@ -89,7 +89,7 @@ type SearchUserEntry struct {
ExtID string `json:"externalId"` ExtID string `json:"externalId"`
UserName string `json:"userName"` UserName string `json:"userName"`
Emails []SearchUserEmailEntry `json:"emails"` Emails []SearchUserEmailEntry `json:"emails"`
Groups []any Groups []interface{}
} }
// SearchUserRes is the struct to parse the result of search user API of UAA // SearchUserRes is the struct to parse the result of search user API of UAA

View File

@ -75,7 +75,7 @@ func GenerateRandomStringWithLen(length int) string {
if err != nil { if err != nil {
log.Warningf("Error reading random bytes: %v", err) log.Warningf("Error reading random bytes: %v", err)
} }
for i := range length { for i := 0; i < length; i++ {
result[i] = chars[int(result[i])%l] result[i] = chars[int(result[i])%l]
} }
return string(result) return string(result)
@ -140,7 +140,7 @@ func ParseTimeStamp(timestamp string) (*time.Time, error) {
} }
// ConvertMapToStruct is used to fill the specified struct with map. // ConvertMapToStruct is used to fill the specified struct with map.
func ConvertMapToStruct(object any, values any) error { func ConvertMapToStruct(object interface{}, values interface{}) error {
if object == nil { if object == nil {
return errors.New("nil struct is not supported") return errors.New("nil struct is not supported")
} }
@ -158,7 +158,7 @@ func ConvertMapToStruct(object any, values any) error {
} }
// ParseProjectIDOrName parses value to ID(int64) or name(string) // ParseProjectIDOrName parses value to ID(int64) or name(string)
func ParseProjectIDOrName(value any) (int64, string, error) { func ParseProjectIDOrName(value interface{}) (int64, string, error) {
if value == nil { if value == nil {
return 0, "", errors.New("harborIDOrName is nil") return 0, "", errors.New("harborIDOrName is nil")
} }
@ -177,7 +177,7 @@ func ParseProjectIDOrName(value any) (int64, string, error) {
} }
// SafeCastString -- cast an object to string safely // SafeCastString -- cast an object to string safely
func SafeCastString(value any) string { func SafeCastString(value interface{}) string {
if result, ok := value.(string); ok { if result, ok := value.(string); ok {
return result return result
} }
@ -185,7 +185,7 @@ func SafeCastString(value any) string {
} }
// SafeCastInt -- // SafeCastInt --
func SafeCastInt(value any) int { func SafeCastInt(value interface{}) int {
if result, ok := value.(int); ok { if result, ok := value.(int); ok {
return result return result
} }
@ -193,7 +193,7 @@ func SafeCastInt(value any) int {
} }
// SafeCastBool -- // SafeCastBool --
func SafeCastBool(value any) bool { func SafeCastBool(value interface{}) bool {
if result, ok := value.(bool); ok { if result, ok := value.(bool); ok {
return result return result
} }
@ -201,7 +201,7 @@ func SafeCastBool(value any) bool {
} }
// SafeCastFloat64 -- // SafeCastFloat64 --
func SafeCastFloat64(value any) float64 { func SafeCastFloat64(value interface{}) float64 {
if result, ok := value.(float64); ok { if result, ok := value.(float64); ok {
return result return result
} }
@ -214,9 +214,9 @@ func TrimLower(str string) string {
} }
// GetStrValueOfAnyType return string format of any value, for map, need to convert to json // GetStrValueOfAnyType return string format of any value, for map, need to convert to json
func GetStrValueOfAnyType(value any) string { func GetStrValueOfAnyType(value interface{}) string {
var strVal string var strVal string
if _, ok := value.(map[string]any); ok { if _, ok := value.(map[string]interface{}); ok {
b, err := json.Marshal(value) b, err := json.Marshal(value)
if err != nil { if err != nil {
log.Errorf("can not marshal json object, error %v", err) log.Errorf("can not marshal json object, error %v", err)
@ -237,18 +237,18 @@ func GetStrValueOfAnyType(value any) string {
} }
// IsIllegalLength ... // IsIllegalLength ...
func IsIllegalLength(s string, minVal int, maxVal int) bool { func IsIllegalLength(s string, min int, max int) bool {
if minVal == -1 { if min == -1 {
return (len(s) > maxVal) return (len(s) > max)
} }
if maxVal == -1 { if max == -1 {
return (len(s) <= minVal) return (len(s) <= min)
} }
return (len(s) < minVal || len(s) > maxVal) return (len(s) < min || len(s) > max)
} }
// ParseJSONInt ... // ParseJSONInt ...
func ParseJSONInt(value any) (int, bool) { func ParseJSONInt(value interface{}) (int, bool) {
switch v := value.(type) { switch v := value.(type) {
case float64: case float64:
return int(v), true return int(v), true
@ -337,3 +337,13 @@ func MostMatchSorter(a, b string, matchWord string) bool {
func IsLocalPath(path string) bool { func IsLocalPath(path string) bool {
return len(path) == 0 || (strings.HasPrefix(path, "/") && !strings.HasPrefix(path, "//")) return len(path) == 0 || (strings.HasPrefix(path, "/") && !strings.HasPrefix(path, "//"))
} }
// StringInSlice check if the string is in the slice
func StringInSlice(str string, slice []string) bool {
for _, s := range slice {
if s == str {
return true
}
}
return false
}

View File

@ -216,7 +216,7 @@ type testingStruct struct {
} }
func TestConvertMapToStruct(t *testing.T) { func TestConvertMapToStruct(t *testing.T) {
dataMap := make(map[string]any) dataMap := make(map[string]interface{})
dataMap["Name"] = "testing" dataMap["Name"] = "testing"
dataMap["Count"] = 100 dataMap["Count"] = 100
@ -232,7 +232,7 @@ func TestConvertMapToStruct(t *testing.T) {
func TestSafeCastString(t *testing.T) { func TestSafeCastString(t *testing.T) {
type args struct { type args struct {
value any value interface{}
} }
tests := []struct { tests := []struct {
name string name string
@ -254,7 +254,7 @@ func TestSafeCastString(t *testing.T) {
func TestSafeCastBool(t *testing.T) { func TestSafeCastBool(t *testing.T) {
type args struct { type args struct {
value any value interface{}
} }
tests := []struct { tests := []struct {
name string name string
@ -276,7 +276,7 @@ func TestSafeCastBool(t *testing.T) {
func TestSafeCastInt(t *testing.T) { func TestSafeCastInt(t *testing.T) {
type args struct { type args struct {
value any value interface{}
} }
tests := []struct { tests := []struct {
name string name string
@ -298,7 +298,7 @@ func TestSafeCastInt(t *testing.T) {
func TestSafeCastFloat64(t *testing.T) { func TestSafeCastFloat64(t *testing.T) {
type args struct { type args struct {
value any value interface{}
} }
tests := []struct { tests := []struct {
name string name string
@ -342,7 +342,7 @@ func TestTrimLower(t *testing.T) {
func TestGetStrValueOfAnyType(t *testing.T) { func TestGetStrValueOfAnyType(t *testing.T) {
type args struct { type args struct {
value any value interface{}
} }
tests := []struct { tests := []struct {
name string name string
@ -357,7 +357,7 @@ func TestGetStrValueOfAnyType(t *testing.T) {
{"string", args{"hello world"}, "hello world"}, {"string", args{"hello world"}, "hello world"},
{"bool", args{true}, "true"}, {"bool", args{true}, "true"},
{"bool", args{false}, "false"}, {"bool", args{false}, "false"},
{"map", args{map[string]any{"key1": "value1"}}, "{\"key1\":\"value1\"}"}, {"map", args{map[string]interface{}{"key1": "value1"}}, "{\"key1\":\"value1\"}"},
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {

View File

@ -66,7 +66,8 @@ func parseV1alpha1SkipList(artifact *artifact.Artifact, manifest *v1.Manifest) {
skipListAnnotationKey := fmt.Sprintf("%s.%s.%s", AnnotationPrefix, V1alpha1, SkipList) skipListAnnotationKey := fmt.Sprintf("%s.%s.%s", AnnotationPrefix, V1alpha1, SkipList)
skipList, ok := manifest.Config.Annotations[skipListAnnotationKey] skipList, ok := manifest.Config.Annotations[skipListAnnotationKey]
if ok { if ok {
for skipKey := range strings.SplitSeq(skipList, ",") { skipKeyList := strings.Split(skipList, ",")
for _, skipKey := range skipKeyList {
delete(metadata, skipKey) delete(metadata, skipKey)
} }
artifact.ExtraAttrs = metadata artifact.ExtraAttrs = metadata

View File

@ -231,7 +231,7 @@ func (p *v1alpha1TestSuite) TestParse() {
manifestMediaType, content, err := manifest.Payload() manifestMediaType, content, err := manifest.Payload()
p.Require().Nil(err) p.Require().Nil(err)
metadata := map[string]any{} metadata := map[string]interface{}{}
configBlob := io.NopCloser(strings.NewReader(ormbConfig)) configBlob := io.NopCloser(strings.NewReader(ormbConfig))
err = json.NewDecoder(configBlob).Decode(&metadata) err = json.NewDecoder(configBlob).Decode(&metadata)
p.Require().Nil(err) p.Require().Nil(err)
@ -244,7 +244,7 @@ func (p *v1alpha1TestSuite) TestParse() {
p.Len(art.ExtraAttrs, 12) p.Len(art.ExtraAttrs, 12)
p.Equal("CNN Model", art.ExtraAttrs["description"]) p.Equal("CNN Model", art.ExtraAttrs["description"])
p.Equal("TensorFlow", art.ExtraAttrs["framework"]) p.Equal("TensorFlow", art.ExtraAttrs["framework"])
p.Equal([]any{map[string]any{"name": "batch_size", "value": "32"}}, art.ExtraAttrs["hyperparameters"]) p.Equal([]interface{}{map[string]interface{}{"name": "batch_size", "value": "32"}}, art.ExtraAttrs["hyperparameters"])
p.Equal("sha256:d923b93eadde0af5c639a972710a4d919066aba5d0dfbf4b9385099f70272da0", art.Icon) p.Equal("sha256:d923b93eadde0af5c639a972710a4d919066aba5d0dfbf4b9385099f70272da0", art.Icon)
// ormbManifestWithoutSkipList // ormbManifestWithoutSkipList
@ -255,7 +255,7 @@ func (p *v1alpha1TestSuite) TestParse() {
manifestMediaType, content, err = manifest.Payload() manifestMediaType, content, err = manifest.Payload()
p.Require().Nil(err) p.Require().Nil(err)
metadata = map[string]any{} metadata = map[string]interface{}{}
configBlob = io.NopCloser(strings.NewReader(ormbConfig)) configBlob = io.NopCloser(strings.NewReader(ormbConfig))
err = json.NewDecoder(configBlob).Decode(&metadata) err = json.NewDecoder(configBlob).Decode(&metadata)
p.Require().Nil(err) p.Require().Nil(err)
@ -268,7 +268,7 @@ func (p *v1alpha1TestSuite) TestParse() {
p.Len(art.ExtraAttrs, 13) p.Len(art.ExtraAttrs, 13)
p.Equal("CNN Model", art.ExtraAttrs["description"]) p.Equal("CNN Model", art.ExtraAttrs["description"])
p.Equal("TensorFlow", art.ExtraAttrs["framework"]) p.Equal("TensorFlow", art.ExtraAttrs["framework"])
p.Equal([]any{map[string]any{"name": "batch_size", "value": "32"}}, art.ExtraAttrs["hyperparameters"]) p.Equal([]interface{}{map[string]interface{}{"name": "batch_size", "value": "32"}}, art.ExtraAttrs["hyperparameters"])
p.Equal("sha256:d923b93eadde0af5c639a972710a4d919066aba5d0dfbf4b9385099f70272da0", art.Icon) p.Equal("sha256:d923b93eadde0af5c639a972710a4d919066aba5d0dfbf4b9385099f70272da0", art.Icon)
// ormbManifestWithoutIcon // ormbManifestWithoutIcon
@ -279,7 +279,7 @@ func (p *v1alpha1TestSuite) TestParse() {
manifestMediaType, content, err = manifest.Payload() manifestMediaType, content, err = manifest.Payload()
p.Require().Nil(err) p.Require().Nil(err)
metadata = map[string]any{} metadata = map[string]interface{}{}
configBlob = io.NopCloser(strings.NewReader(ormbConfig)) configBlob = io.NopCloser(strings.NewReader(ormbConfig))
err = json.NewDecoder(configBlob).Decode(&metadata) err = json.NewDecoder(configBlob).Decode(&metadata)
p.Require().Nil(err) p.Require().Nil(err)
@ -290,7 +290,7 @@ func (p *v1alpha1TestSuite) TestParse() {
p.Len(art.ExtraAttrs, 12) p.Len(art.ExtraAttrs, 12)
p.Equal("CNN Model", art.ExtraAttrs["description"]) p.Equal("CNN Model", art.ExtraAttrs["description"])
p.Equal("TensorFlow", art.ExtraAttrs["framework"]) p.Equal("TensorFlow", art.ExtraAttrs["framework"])
p.Equal([]any{map[string]any{"name": "batch_size", "value": "32"}}, art.ExtraAttrs["hyperparameters"]) p.Equal([]interface{}{map[string]interface{}{"name": "batch_size", "value": "32"}}, art.ExtraAttrs["hyperparameters"])
p.Equal("", art.Icon) p.Equal("", art.Icon)
} }

View File

@ -313,7 +313,7 @@ func (c *controller) getByTag(ctx context.Context, repository, tag string, optio
return nil, err return nil, err
} }
tags, err := c.tagCtl.List(ctx, &q.Query{ tags, err := c.tagCtl.List(ctx, &q.Query{
Keywords: map[string]any{ Keywords: map[string]interface{}{
"RepositoryID": repo.RepositoryID, "RepositoryID": repo.RepositoryID,
"Name": tag, "Name": tag,
}, },
@ -356,7 +356,7 @@ func (c *controller) deleteDeeply(ctx context.Context, id int64, isRoot, isAcces
return nil return nil
} }
parents, err := c.artMgr.ListReferences(ctx, &q.Query{ parents, err := c.artMgr.ListReferences(ctx, &q.Query{
Keywords: map[string]any{ Keywords: map[string]interface{}{
"ChildID": id, "ChildID": id,
}, },
}) })
@ -385,7 +385,7 @@ func (c *controller) deleteDeeply(ctx context.Context, id int64, isRoot, isAcces
if acc.IsHard() { if acc.IsHard() {
// if this acc artifact has parent(is child), set isRoot to false // if this acc artifact has parent(is child), set isRoot to false
parents, err := c.artMgr.ListReferences(ctx, &q.Query{ parents, err := c.artMgr.ListReferences(ctx, &q.Query{
Keywords: map[string]any{ Keywords: map[string]interface{}{
"ChildID": acc.GetData().ArtifactID, "ChildID": acc.GetData().ArtifactID,
}, },
}) })
@ -752,7 +752,7 @@ func (c *controller) populateIcon(art *Artifact) {
func (c *controller) populateTags(ctx context.Context, art *Artifact, option *tag.Option) { func (c *controller) populateTags(ctx context.Context, art *Artifact, option *tag.Option) {
tags, err := c.tagCtl.List(ctx, &q.Query{ tags, err := c.tagCtl.List(ctx, &q.Query{
Keywords: map[string]any{ Keywords: map[string]interface{}{
"artifact_id": art.ID, "artifact_id": art.ID,
}, },
}, option) }, option)

View File

@ -56,7 +56,7 @@ func (suite *IteratorTestSuite) TeardownSuite() {
func (suite *IteratorTestSuite) TestIterator() { func (suite *IteratorTestSuite) TestIterator() {
suite.accMgr.On("List", mock.Anything, mock.Anything).Return([]accessorymodel.Accessory{}, nil) suite.accMgr.On("List", mock.Anything, mock.Anything).Return([]accessorymodel.Accessory{}, nil)
q1 := &q.Query{PageNumber: 1, PageSize: 5, Keywords: map[string]any{}} q1 := &q.Query{PageNumber: 1, PageSize: 5, Keywords: map[string]interface{}{}}
suite.artMgr.On("List", mock.Anything, q1).Return([]*artifact.Artifact{ suite.artMgr.On("List", mock.Anything, q1).Return([]*artifact.Artifact{
{ID: 1}, {ID: 1},
{ID: 2}, {ID: 2},
@ -65,7 +65,7 @@ func (suite *IteratorTestSuite) TestIterator() {
{ID: 5}, {ID: 5},
}, nil) }, nil)
q2 := &q.Query{PageNumber: 2, PageSize: 5, Keywords: map[string]any{}} q2 := &q.Query{PageNumber: 2, PageSize: 5, Keywords: map[string]interface{}{}}
suite.artMgr.On("List", mock.Anything, q2).Return([]*artifact.Artifact{ suite.artMgr.On("List", mock.Anything, q2).Return([]*artifact.Artifact{
{ID: 6}, {ID: 6},
{ID: 7}, {ID: 7},

View File

@ -40,7 +40,7 @@ func (artifact *Artifact) UnmarshalJSON(data []byte) error {
type Alias Artifact type Alias Artifact
ali := &struct { ali := &struct {
*Alias *Alias
AccessoryItems []any `json:"accessories,omitempty"` AccessoryItems []interface{} `json:"accessories,omitempty"`
}{ }{
Alias: (*Alias)(artifact), Alias: (*Alias)(artifact),
} }

View File

@ -44,7 +44,7 @@ type ManifestProcessor struct {
// AbstractMetadata abstracts metadata of artifact // AbstractMetadata abstracts metadata of artifact
func (m *ManifestProcessor) AbstractMetadata(ctx context.Context, artifact *artifact.Artifact, content []byte) error { func (m *ManifestProcessor) AbstractMetadata(ctx context.Context, artifact *artifact.Artifact, content []byte) error {
// parse metadata from config layer // parse metadata from config layer
metadata := map[string]any{} metadata := map[string]interface{}{}
if err := m.UnmarshalConfig(ctx, artifact.RepositoryName, content, &metadata); err != nil { if err := m.UnmarshalConfig(ctx, artifact.RepositoryName, content, &metadata); err != nil {
return err return err
} }
@ -55,7 +55,7 @@ func (m *ManifestProcessor) AbstractMetadata(ctx context.Context, artifact *arti
} }
if artifact.ExtraAttrs == nil { if artifact.ExtraAttrs == nil {
artifact.ExtraAttrs = map[string]any{} artifact.ExtraAttrs = map[string]interface{}{}
} }
for _, property := range m.properties { for _, property := range m.properties {
artifact.ExtraAttrs[property] = metadata[property] artifact.ExtraAttrs[property] = metadata[property]
@ -80,7 +80,7 @@ func (m *ManifestProcessor) ListAdditionTypes(_ context.Context, _ *artifact.Art
} }
// UnmarshalConfig unmarshal the config blob of the artifact into the specified object "v" // UnmarshalConfig unmarshal the config blob of the artifact into the specified object "v"
func (m *ManifestProcessor) UnmarshalConfig(_ context.Context, repository string, manifest []byte, v any) error { func (m *ManifestProcessor) UnmarshalConfig(_ context.Context, repository string, manifest []byte, v interface{}) error {
// unmarshal manifest // unmarshal manifest
mani := &v1.Manifest{} mani := &v1.Manifest{}
if err := json.Unmarshal(manifest, mani); err != nil { if err := json.Unmarshal(manifest, mani); err != nil {

View File

@ -89,7 +89,7 @@ func (p *processorTestSuite) TestAbstractAddition() {
Repository: "github.com/goharbor", Repository: "github.com/goharbor",
}, },
}, },
Values: map[string]any{ Values: map[string]interface{}{
"cluster.enable": true, "cluster.enable": true,
"cluster.slaveCount": 1, "cluster.slaveCount": 1,
"image.pullPolicy": "Always", "image.pullPolicy": "Always",

View File

@ -17,8 +17,6 @@ package parser
import ( import (
"context" "context"
"fmt" "fmt"
"io"
"path/filepath"
ocispec "github.com/opencontainers/image-spec/specs-go/v1" ocispec "github.com/opencontainers/image-spec/specs-go/v1"
@ -42,11 +40,6 @@ const (
// defaultFileSizeLimit is the default file size limit. // defaultFileSizeLimit is the default file size limit.
defaultFileSizeLimit = 1024 * 1024 * 4 // 4MB defaultFileSizeLimit = 1024 * 1024 * 4 // 4MB
// formatTar is the format of tar file.
formatTar = ".tar"
// formatRaw is the format of raw file.
formatRaw = ".raw"
) )
// newBase creates a new base parser. // newBase creates a new base parser.
@ -77,23 +70,10 @@ func (b *base) Parse(_ context.Context, artifact *artifact.Artifact, layer *ocis
} }
defer stream.Close() defer stream.Close()
content, err := untar(stream)
content, err := decodeContent(layer.MediaType, stream)
if err != nil { if err != nil {
return "", nil, fmt.Errorf("failed to decode content: %w", err) return "", nil, fmt.Errorf("failed to untar the content: %w", err)
} }
return contentTypeTextPlain, content, nil return contentTypeTextPlain, content, nil
} }
func decodeContent(mediaType string, reader io.Reader) ([]byte, error) {
format := filepath.Ext(mediaType)
switch format {
case formatTar:
return untar(reader)
case formatRaw:
return io.ReadAll(reader)
default:
return nil, fmt.Errorf("unsupported format: %s", format)
}
}

View File

@ -63,10 +63,9 @@ func TestBaseParse(t *testing.T) {
expectedError: "failed to pull blob from registry: registry error", expectedError: "failed to pull blob from registry: registry error",
}, },
{ {
name: "successful parse (tar format)", name: "successful parse",
artifact: &artifact.Artifact{RepositoryName: "test/repo"}, artifact: &artifact.Artifact{RepositoryName: "test/repo"},
layer: &v1.Descriptor{ layer: &v1.Descriptor{
MediaType: "vnd.foo.bar.tar",
Digest: "sha256:1234", Digest: "sha256:1234",
}, },
mockSetup: func(m *mock.Client) { mockSetup: func(m *mock.Client) {
@ -83,34 +82,6 @@ func TestBaseParse(t *testing.T) {
}, },
expectedType: contentTypeTextPlain, expectedType: contentTypeTextPlain,
}, },
{
name: "successful parse (raw format)",
artifact: &artifact.Artifact{RepositoryName: "test/repo"},
layer: &v1.Descriptor{
MediaType: "vnd.foo.bar.raw",
Digest: "sha256:1234",
},
mockSetup: func(m *mock.Client) {
var buf bytes.Buffer
buf.Write([]byte("test content"))
m.On("PullBlob", "test/repo", "sha256:1234").Return(int64(0), io.NopCloser(bytes.NewReader(buf.Bytes())), nil)
},
expectedType: contentTypeTextPlain,
},
{
name: "error parse (unsupported format)",
artifact: &artifact.Artifact{RepositoryName: "test/repo"},
layer: &v1.Descriptor{
MediaType: "vnd.foo.bar.unknown",
Digest: "sha256:1234",
},
mockSetup: func(m *mock.Client) {
var buf bytes.Buffer
buf.Write([]byte("test content"))
m.On("PullBlob", "test/repo", "sha256:1234").Return(int64(0), io.NopCloser(bytes.NewReader(buf.Bytes())), nil)
},
expectedError: "failed to decode content: unsupported format: .unknown",
},
} }
for _, tt := range tests { for _, tt := range tests {

View File

@ -17,7 +17,6 @@ package parser
import ( import (
"context" "context"
"fmt" "fmt"
"slices"
modelspec "github.com/CloudNativeAI/model-spec/specs-go/v1" modelspec "github.com/CloudNativeAI/model-spec/specs-go/v1"
ocispec "github.com/opencontainers/image-spec/specs-go/v1" ocispec "github.com/opencontainers/image-spec/specs-go/v1"
@ -48,10 +47,7 @@ func (l *license) Parse(ctx context.Context, artifact *artifact.Artifact, manife
// lookup the license file layer // lookup the license file layer
var layer *ocispec.Descriptor var layer *ocispec.Descriptor
for _, desc := range manifest.Layers { for _, desc := range manifest.Layers {
if slices.Contains([]string{ if desc.MediaType == modelspec.MediaTypeModelDoc {
modelspec.MediaTypeModelDoc,
modelspec.MediaTypeModelDocRaw,
}, desc.MediaType) {
if desc.Annotations != nil { if desc.Annotations != nil {
filepath := desc.Annotations[modelspec.AnnotationFilepath] filepath := desc.Annotations[modelspec.AnnotationFilepath]
if filepath == "LICENSE" || filepath == "LICENSE.txt" { if filepath == "LICENSE" || filepath == "LICENSE.txt" {

View File

@ -83,29 +83,6 @@ func TestLicenseParser(t *testing.T) {
expectedType: contentTypeTextPlain, expectedType: contentTypeTextPlain,
expectedOutput: []byte("MIT License"), expectedOutput: []byte("MIT License"),
}, },
{
name: "LICENSE parse success (raw)",
manifest: &ocispec.Manifest{
Layers: []ocispec.Descriptor{
{
MediaType: modelspec.MediaTypeModelDocRaw,
Annotations: map[string]string{
modelspec.AnnotationFilepath: "LICENSE",
},
Digest: "sha256:abc123",
},
},
},
setupMockReg: func(mc *mockregistry.Client) {
var buf bytes.Buffer
buf.Write([]byte("MIT License"))
mc.On("PullBlob", mock.Anything, "sha256:abc123").
Return(int64(buf.Len()), io.NopCloser(bytes.NewReader(buf.Bytes())), nil)
},
expectedType: contentTypeTextPlain,
expectedOutput: []byte("MIT License"),
},
{ {
name: "LICENSE.txt parse success", name: "LICENSE.txt parse success",
manifest: &ocispec.Manifest{ manifest: &ocispec.Manifest{

View File

@ -17,7 +17,6 @@ package parser
import ( import (
"context" "context"
"fmt" "fmt"
"slices"
modelspec "github.com/CloudNativeAI/model-spec/specs-go/v1" modelspec "github.com/CloudNativeAI/model-spec/specs-go/v1"
ocispec "github.com/opencontainers/image-spec/specs-go/v1" ocispec "github.com/opencontainers/image-spec/specs-go/v1"
@ -48,10 +47,7 @@ func (r *readme) Parse(ctx context.Context, artifact *artifact.Artifact, manifes
// lookup the readme file layer. // lookup the readme file layer.
var layer *ocispec.Descriptor var layer *ocispec.Descriptor
for _, desc := range manifest.Layers { for _, desc := range manifest.Layers {
if slices.Contains([]string{ if desc.MediaType == modelspec.MediaTypeModelDoc {
modelspec.MediaTypeModelDoc,
modelspec.MediaTypeModelDocRaw,
}, desc.MediaType) {
if desc.Annotations != nil { if desc.Annotations != nil {
filepath := desc.Annotations[modelspec.AnnotationFilepath] filepath := desc.Annotations[modelspec.AnnotationFilepath]
if filepath == "README" || filepath == "README.md" { if filepath == "README" || filepath == "README.md" {

View File

@ -113,29 +113,6 @@ func TestReadmeParser(t *testing.T) {
expectedType: contentTypeMarkdown, expectedType: contentTypeMarkdown,
expectedOutput: []byte("# Test README"), expectedOutput: []byte("# Test README"),
}, },
{
name: "README parse success (raw)",
manifest: &ocispec.Manifest{
Layers: []ocispec.Descriptor{
{
MediaType: modelspec.MediaTypeModelDocRaw,
Annotations: map[string]string{
modelspec.AnnotationFilepath: "README",
},
Digest: "sha256:def456",
},
},
},
setupMockReg: func(mc *mockregistry.Client) {
var buf bytes.Buffer
buf.Write([]byte("# Test README"))
mc.On("PullBlob", mock.Anything, "sha256:def456").
Return(int64(buf.Len()), io.NopCloser(bytes.NewReader(buf.Bytes())), nil)
},
expectedType: contentTypeMarkdown,
expectedOutput: []byte("# Test README"),
},
{ {
name: "registry error", name: "registry error",
manifest: &ocispec.Manifest{ manifest: &ocispec.Manifest{

View File

@ -156,7 +156,7 @@ func TestAddNode(t *testing.T) {
// Verify the path exists. // Verify the path exists.
current := root current := root
parts := filepath.Clean(tt.path) parts := filepath.Clean(tt.path)
for part := range strings.SplitSeq(parts, string(filepath.Separator)) { for _, part := range strings.Split(parts, string(filepath.Separator)) {
if part == "" { if part == "" {
continue continue
} }

View File

@ -110,7 +110,7 @@ func (d *defaultProcessor) AbstractMetadata(ctx context.Context, artifact *artif
} }
defer blob.Close() defer blob.Close()
// parse metadata from config layer // parse metadata from config layer
metadata := map[string]any{} metadata := map[string]interface{}{}
if err = json.NewDecoder(blob).Decode(&metadata); err != nil { if err = json.NewDecoder(blob).Decode(&metadata); err != nil {
return err return err
} }

View File

@ -268,7 +268,7 @@ func (d *defaultProcessorTestSuite) TestAbstractMetadata() {
manifestMediaType, content, err := manifest.Payload() manifestMediaType, content, err := manifest.Payload()
d.Require().Nil(err) d.Require().Nil(err)
metadata := map[string]any{} metadata := map[string]interface{}{}
configBlob := io.NopCloser(strings.NewReader(ormbConfig)) configBlob := io.NopCloser(strings.NewReader(ormbConfig))
err = json.NewDecoder(configBlob).Decode(&metadata) err = json.NewDecoder(configBlob).Decode(&metadata)
d.Require().Nil(err) d.Require().Nil(err)
@ -289,7 +289,7 @@ func (d *defaultProcessorTestSuite) TestAbstractMetadataOfOCIManifesttWithUnknow
d.Require().Nil(err) d.Require().Nil(err)
configBlob := io.NopCloser(strings.NewReader(UnknownJsonConfig)) configBlob := io.NopCloser(strings.NewReader(UnknownJsonConfig))
metadata := map[string]any{} metadata := map[string]interface{}{}
err = json.NewDecoder(configBlob).Decode(&metadata) err = json.NewDecoder(configBlob).Decode(&metadata)
d.Require().Nil(err) d.Require().Nil(err)

View File

@ -44,7 +44,7 @@ func (m *manifestV1Processor) AbstractMetadata(_ context.Context, artifact *arti
return err return err
} }
if artifact.ExtraAttrs == nil { if artifact.ExtraAttrs == nil {
artifact.ExtraAttrs = map[string]any{} artifact.ExtraAttrs = map[string]interface{}{}
} }
artifact.ExtraAttrs["architecture"] = mani.Architecture artifact.ExtraAttrs["architecture"] = mani.Architecture
return nil return nil

View File

@ -59,7 +59,7 @@ func (m *manifestV2Processor) AbstractMetadata(ctx context.Context, artifact *ar
return err return err
} }
if artifact.ExtraAttrs == nil { if artifact.ExtraAttrs == nil {
artifact.ExtraAttrs = map[string]any{} artifact.ExtraAttrs = map[string]interface{}{}
} }
artifact.ExtraAttrs["created"] = config.Created artifact.ExtraAttrs["created"] = config.Created
artifact.ExtraAttrs["architecture"] = config.Architecture artifact.ExtraAttrs["architecture"] = config.Architecture

View File

@ -62,14 +62,14 @@ type Processor struct {
} }
func (m *Processor) AbstractMetadata(ctx context.Context, art *artifact.Artifact, manifestBody []byte) error { func (m *Processor) AbstractMetadata(ctx context.Context, art *artifact.Artifact, manifestBody []byte) error {
art.ExtraAttrs = map[string]any{} art.ExtraAttrs = map[string]interface{}{}
manifest := &v1.Manifest{} manifest := &v1.Manifest{}
if err := json.Unmarshal(manifestBody, manifest); err != nil { if err := json.Unmarshal(manifestBody, manifest); err != nil {
return err return err
} }
if art.ExtraAttrs == nil { if art.ExtraAttrs == nil {
art.ExtraAttrs = map[string]any{} art.ExtraAttrs = map[string]interface{}{}
} }
if manifest.Annotations[AnnotationVariantKey] == AnnotationVariantValue || manifest.Annotations[AnnotationHandlerKey] == AnnotationHandlerValue { if manifest.Annotations[AnnotationVariantKey] == AnnotationVariantValue || manifest.Annotations[AnnotationHandlerKey] == AnnotationHandlerValue {
// for annotation way // for annotation way

View File

@ -225,10 +225,10 @@ func (c *controller) Get(ctx context.Context, digest string, options ...Option)
opts := newOptions(options...) opts := newOptions(options...)
keywords := make(map[string]any) keywords := make(map[string]interface{})
if digest != "" { if digest != "" {
ol := q.OrList{ ol := q.OrList{
Values: []any{ Values: []interface{}{
digest, digest,
}, },
} }

View File

@ -232,7 +232,7 @@ func (suite *ControllerTestSuite) TestGet() {
func (suite *ControllerTestSuite) TestSync() { func (suite *ControllerTestSuite) TestSync() {
var references []distribution.Descriptor var references []distribution.Descriptor
for i := range 5 { for i := 0; i < 5; i++ {
references = append(references, distribution.Descriptor{ references = append(references, distribution.Descriptor{
MediaType: fmt.Sprintf("media type %d", i), MediaType: fmt.Sprintf("media type %d", i),
Digest: suite.Digest(), Digest: suite.Digest(),

View File

@ -46,11 +46,11 @@ type Controller interface {
// UserConfigs get the user scope configurations // UserConfigs get the user scope configurations
UserConfigs(ctx context.Context) (map[string]*models.Value, error) UserConfigs(ctx context.Context) (map[string]*models.Value, error)
// UpdateUserConfigs update the user scope configurations // UpdateUserConfigs update the user scope configurations
UpdateUserConfigs(ctx context.Context, conf map[string]any) error UpdateUserConfigs(ctx context.Context, conf map[string]interface{}) error
// AllConfigs get all configurations, used by internal, should include the system config items // AllConfigs get all configurations, used by internal, should include the system config items
AllConfigs(ctx context.Context) (map[string]any, error) AllConfigs(ctx context.Context) (map[string]interface{}, error)
// ConvertForGet - delete sensitive attrs and add editable field to every attr // ConvertForGet - delete sensitive attrs and add editable field to every attr
ConvertForGet(ctx context.Context, cfg map[string]any, internal bool) (map[string]*models.Value, error) ConvertForGet(ctx context.Context, cfg map[string]interface{}, internal bool) (map[string]*models.Value, error)
// OverwriteConfig overwrite config in the database and set all configure read only when CONFIG_OVERWRITE_JSON is provided // OverwriteConfig overwrite config in the database and set all configure read only when CONFIG_OVERWRITE_JSON is provided
OverwriteConfig(ctx context.Context) error OverwriteConfig(ctx context.Context) error
} }
@ -70,13 +70,13 @@ func (c *controller) UserConfigs(ctx context.Context) (map[string]*models.Value,
return c.ConvertForGet(ctx, configs, false) return c.ConvertForGet(ctx, configs, false)
} }
func (c *controller) AllConfigs(ctx context.Context) (map[string]any, error) { func (c *controller) AllConfigs(ctx context.Context) (map[string]interface{}, error) {
mgr := config.GetCfgManager(ctx) mgr := config.GetCfgManager(ctx)
configs := mgr.GetAll(ctx) configs := mgr.GetAll(ctx)
return configs, nil return configs, nil
} }
func (c *controller) UpdateUserConfigs(ctx context.Context, conf map[string]any) error { func (c *controller) UpdateUserConfigs(ctx context.Context, conf map[string]interface{}) error {
if readOnlyForAll { if readOnlyForAll {
return errors.ForbiddenError(nil).WithMessage("current config is init by env variable: CONFIG_OVERWRITE_JSON, it cannot be updated") return errors.ForbiddenError(nil).WithMessage("current config is init by env variable: CONFIG_OVERWRITE_JSON, it cannot be updated")
} }
@ -97,7 +97,7 @@ func (c *controller) UpdateUserConfigs(ctx context.Context, conf map[string]any)
return c.updateLogEndpoint(ctx, conf) return c.updateLogEndpoint(ctx, conf)
} }
func (c *controller) updateLogEndpoint(ctx context.Context, cfgs map[string]any) error { func (c *controller) updateLogEndpoint(ctx context.Context, cfgs map[string]interface{}) error {
// check if the audit log forward endpoint updated // check if the audit log forward endpoint updated
if _, ok := cfgs[common.AuditLogForwardEndpoint]; ok { if _, ok := cfgs[common.AuditLogForwardEndpoint]; ok {
auditEP := config.AuditLogForwardEndpoint(ctx) auditEP := config.AuditLogForwardEndpoint(ctx)
@ -112,7 +112,7 @@ func (c *controller) updateLogEndpoint(ctx context.Context, cfgs map[string]any)
return nil return nil
} }
func (c *controller) validateCfg(ctx context.Context, cfgs map[string]any) error { func (c *controller) validateCfg(ctx context.Context, cfgs map[string]interface{}) error {
mgr := config.GetCfgManager(ctx) mgr := config.GetCfgManager(ctx)
// check if auth can be modified // check if auth can be modified
@ -146,7 +146,7 @@ func (c *controller) validateCfg(ctx context.Context, cfgs map[string]any) error
return nil return nil
} }
func verifySkipAuditLogCfg(ctx context.Context, cfgs map[string]any, mgr config.Manager) error { func verifySkipAuditLogCfg(ctx context.Context, cfgs map[string]interface{}, mgr config.Manager) error {
updated := false updated := false
endPoint := mgr.Get(ctx, common.AuditLogForwardEndpoint).GetString() endPoint := mgr.Get(ctx, common.AuditLogForwardEndpoint).GetString()
skipAuditDB := mgr.Get(ctx, common.SkipAuditLogDatabase).GetBool() skipAuditDB := mgr.Get(ctx, common.SkipAuditLogDatabase).GetBool()
@ -169,7 +169,7 @@ func verifySkipAuditLogCfg(ctx context.Context, cfgs map[string]any, mgr config.
} }
// verifyValueLengthCfg verifies the cfgs which need to check the value max length to align with frontend. // verifyValueLengthCfg verifies the cfgs which need to check the value max length to align with frontend.
func verifyValueLengthCfg(_ context.Context, cfgs map[string]any) error { func verifyValueLengthCfg(_ context.Context, cfgs map[string]interface{}) error {
maxValue := maxValueLimitedByLength(common.UIMaxLengthLimitedOfNumber) maxValue := maxValueLimitedByLength(common.UIMaxLengthLimitedOfNumber)
validateCfgs := []string{ validateCfgs := []string{
common.TokenExpiration, common.TokenExpiration,
@ -206,7 +206,7 @@ func maxValueLimitedByLength(length int) int64 {
var value int64 var value int64
// the times for multiple, should *10 for every time // the times for multiple, should *10 for every time
times := 1 times := 1
for range length { for i := 0; i < length; i++ {
value = value + int64(9*times) value = value + int64(9*times)
times = times * 10 times = times * 10
} }
@ -218,10 +218,10 @@ func maxValueLimitedByLength(length int) int64 {
// Only for migrating from the legacy schedule. // Only for migrating from the legacy schedule.
type ScanAllPolicy struct { type ScanAllPolicy struct {
Type string `json:"type"` Type string `json:"type"`
Param map[string]any `json:"parameter,omitempty"` Param map[string]interface{} `json:"parameter,omitempty"`
} }
func (c *controller) ConvertForGet(ctx context.Context, cfg map[string]any, internal bool) (map[string]*models.Value, error) { func (c *controller) ConvertForGet(ctx context.Context, cfg map[string]interface{}, internal bool) (map[string]*models.Value, error) {
result := map[string]*models.Value{} result := map[string]*models.Value{}
mList := metadata.Instance().GetAll() mList := metadata.Instance().GetAll()
@ -270,7 +270,7 @@ func (c *controller) ConvertForGet(ctx context.Context, cfg map[string]any, inte
} }
func (c *controller) OverwriteConfig(ctx context.Context) error { func (c *controller) OverwriteConfig(ctx context.Context) error {
cfgMap := map[string]any{} cfgMap := map[string]interface{}{}
if v, ok := os.LookupEnv(configOverwriteJSON); ok { if v, ok := os.LookupEnv(configOverwriteJSON); ok {
err := json.Unmarshal([]byte(v), &cfgMap) err := json.Unmarshal([]byte(v), &cfgMap)
if err != nil { if err != nil {

View File

@ -33,7 +33,7 @@ func Test_verifySkipAuditLogCfg(t *testing.T) {
Return(&metadata.ConfigureValue{Name: common.SkipAuditLogDatabase, Value: "true"}) Return(&metadata.ConfigureValue{Name: common.SkipAuditLogDatabase, Value: "true"})
type args struct { type args struct {
ctx context.Context ctx context.Context
cfgs map[string]any cfgs map[string]interface{}
mgr config.Manager mgr config.Manager
} }
tests := []struct { tests := []struct {
@ -42,17 +42,17 @@ func Test_verifySkipAuditLogCfg(t *testing.T) {
wantErr bool wantErr bool
}{ }{
{name: "both configured", args: args{ctx: context.TODO(), {name: "both configured", args: args{ctx: context.TODO(),
cfgs: map[string]any{common.AuditLogForwardEndpoint: "harbor-log:15041", cfgs: map[string]interface{}{common.AuditLogForwardEndpoint: "harbor-log:15041",
common.SkipAuditLogDatabase: true}, common.SkipAuditLogDatabase: true},
mgr: cfgManager}, wantErr: false}, mgr: cfgManager}, wantErr: false},
{name: "no forward endpoint config", args: args{ctx: context.TODO(), {name: "no forward endpoint config", args: args{ctx: context.TODO(),
cfgs: map[string]any{common.SkipAuditLogDatabase: true}, cfgs: map[string]interface{}{common.SkipAuditLogDatabase: true},
mgr: cfgManager}, wantErr: true}, mgr: cfgManager}, wantErr: true},
{name: "none configured", args: args{ctx: context.TODO(), {name: "none configured", args: args{ctx: context.TODO(),
cfgs: map[string]any{}, cfgs: map[string]interface{}{},
mgr: cfgManager}, wantErr: false}, mgr: cfgManager}, wantErr: false},
{name: "enabled skip audit log database, but change log forward endpoint to empty", args: args{ctx: context.TODO(), {name: "enabled skip audit log database, but change log forward endpoint to empty", args: args{ctx: context.TODO(),
cfgs: map[string]any{common.AuditLogForwardEndpoint: ""}, cfgs: map[string]interface{}{common.AuditLogForwardEndpoint: ""},
mgr: cfgManager}, wantErr: true}, mgr: cfgManager}, wantErr: true},
} }
for _, tt := range tests { for _, tt := range tests {
@ -89,24 +89,24 @@ func Test_maxValueLimitedByLength(t *testing.T) {
func Test_verifyValueLengthCfg(t *testing.T) { func Test_verifyValueLengthCfg(t *testing.T) {
type args struct { type args struct {
ctx context.Context ctx context.Context
cfgs map[string]any cfgs map[string]interface{}
} }
tests := []struct { tests := []struct {
name string name string
args args args args
wantErr bool wantErr bool
}{ }{
{name: "valid config", args: args{context.TODO(), map[string]any{ {name: "valid config", args: args{context.TODO(), map[string]interface{}{
common.TokenExpiration: float64(100), common.TokenExpiration: float64(100),
common.RobotTokenDuration: float64(100), common.RobotTokenDuration: float64(100),
common.SessionTimeout: float64(100), common.SessionTimeout: float64(100),
}}, wantErr: false}, }}, wantErr: false},
{name: "invalid config with negative value", args: args{context.TODO(), map[string]any{ {name: "invalid config with negative value", args: args{context.TODO(), map[string]interface{}{
common.TokenExpiration: float64(-1), common.TokenExpiration: float64(-1),
common.RobotTokenDuration: float64(100), common.RobotTokenDuration: float64(100),
common.SessionTimeout: float64(100), common.SessionTimeout: float64(100),
}}, wantErr: true}, }}, wantErr: true},
{name: "invalid config with value over length limit", args: args{context.TODO(), map[string]any{ {name: "invalid config with value over length limit", args: args{context.TODO(), map[string]interface{}{
common.TokenExpiration: float64(100), common.TokenExpiration: float64(100),
common.RobotTokenDuration: float64(100000000000000000), common.RobotTokenDuration: float64(100000000000000000),
common.SessionTimeout: float64(100), common.SessionTimeout: float64(100),

View File

@ -28,12 +28,12 @@ import (
htesting "github.com/goharbor/harbor/src/testing" htesting "github.com/goharbor/harbor/src/testing"
) )
var TestDBConfig = map[string]any{ var TestDBConfig = map[string]interface{}{
common.LDAPBaseDN: "dc=example,dc=com", common.LDAPBaseDN: "dc=example,dc=com",
common.LDAPURL: "ldap.example.com", common.LDAPURL: "ldap.example.com",
} }
var TestConfigWithScanAll = map[string]any{ var TestConfigWithScanAll = map[string]interface{}{
"postgresql_host": "localhost", "postgresql_host": "localhost",
"postgresql_database": "registry", "postgresql_database": "registry",
"postgresql_password": "root123", "postgresql_password": "root123",
@ -67,7 +67,7 @@ func (c *controllerTestSuite) TestGetUserCfg() {
} }
func (c *controllerTestSuite) TestConvertForGet() { func (c *controllerTestSuite) TestConvertForGet() {
conf := map[string]any{ conf := map[string]interface{}{
"ldap_url": "ldaps.myexample,com", "ldap_url": "ldaps.myexample,com",
"ldap_base_dn": "dc=myexample,dc=com", "ldap_base_dn": "dc=myexample,dc=com",
"auth_mode": "ldap_auth", "auth_mode": "ldap_auth",
@ -83,7 +83,7 @@ func (c *controllerTestSuite) TestConvertForGet() {
c.False(exist) c.False(exist)
// password type should be sent to internal api call // password type should be sent to internal api call
conf2 := map[string]any{ conf2 := map[string]interface{}{
"ldap_url": "ldaps.myexample,com", "ldap_url": "ldaps.myexample,com",
"ldap_base_dn": "dc=myexample,dc=com", "ldap_base_dn": "dc=myexample,dc=com",
"auth_mode": "ldap_auth", "auth_mode": "ldap_auth",
@ -109,7 +109,7 @@ func (c *controllerTestSuite) TestGetAll() {
func (c *controllerTestSuite) TestUpdateUserCfg() { func (c *controllerTestSuite) TestUpdateUserCfg() {
userConf := map[string]any{ userConf := map[string]interface{}{
common.LDAPURL: "ldaps.myexample,com", common.LDAPURL: "ldaps.myexample,com",
common.LDAPBaseDN: "dc=myexample,dc=com", common.LDAPBaseDN: "dc=myexample,dc=com",
} }
@ -121,7 +121,7 @@ func (c *controllerTestSuite) TestUpdateUserCfg() {
} }
c.Equal("dc=myexample,dc=com", cfgResp["ldap_base_dn"].Val) c.Equal("dc=myexample,dc=com", cfgResp["ldap_base_dn"].Val)
c.Equal("ldaps.myexample,com", cfgResp["ldap_url"].Val) c.Equal("ldaps.myexample,com", cfgResp["ldap_url"].Val)
badCfg := map[string]any{ badCfg := map[string]interface{}{
common.LDAPScope: 5, common.LDAPScope: 5,
} }
err2 := c.controller.UpdateUserConfigs(ctx, badCfg) err2 := c.controller.UpdateUserConfigs(ctx, badCfg)
@ -130,7 +130,7 @@ func (c *controllerTestSuite) TestUpdateUserCfg() {
} }
/*func (c *controllerTestSuite) TestCheckUnmodifiable() { /*func (c *controllerTestSuite) TestCheckUnmodifiable() {
conf := map[string]any{ conf := map[string]interface{}{
"ldap_url": "ldaps.myexample,com", "ldap_url": "ldaps.myexample,com",
"ldap_base_dn": "dc=myexample,dc=com", "ldap_base_dn": "dc=myexample,dc=com",
"auth_mode": "ldap_auth", "auth_mode": "ldap_auth",

View File

@ -41,7 +41,7 @@ func (h *Handler) Name() string {
} }
// Handle ... // Handle ...
func (h *Handler) Handle(ctx context.Context, value any) error { func (h *Handler) Handle(ctx context.Context, value interface{}) error {
var addAuditLog bool var addAuditLog bool
switch v := value.(type) { switch v := value.(type) {
case *event.PushArtifactEvent, *event.DeleteArtifactEvent, case *event.PushArtifactEvent, *event.DeleteArtifactEvent,

View File

@ -99,7 +99,7 @@ func (a *ArtifactEventHandler) Name() string {
} }
// Handle ... // Handle ...
func (a *ArtifactEventHandler) Handle(ctx context.Context, value any) error { func (a *ArtifactEventHandler) Handle(ctx context.Context, value interface{}) error {
switch v := value.(type) { switch v := value.(type) {
case *event.PullArtifactEvent: case *event.PullArtifactEvent:
return a.onPull(ctx, v.ArtifactEvent) return a.onPull(ctx, v.ArtifactEvent)
@ -190,7 +190,7 @@ func (a *ArtifactEventHandler) syncFlushPullTime(ctx context.Context, artifactID
if tagName != "" { if tagName != "" {
tags, err := tag.Ctl.List(ctx, q.New( tags, err := tag.Ctl.List(ctx, q.New(
map[string]any{ map[string]interface{}{
"ArtifactID": artifactID, "ArtifactID": artifactID,
"Name": tagName, "Name": tagName,
}), nil) }), nil)

View File

@ -53,7 +53,7 @@ func (a *ProjectEventHandler) onProjectDelete(ctx context.Context, event *event.
} }
// Handle handle project event // Handle handle project event
func (a *ProjectEventHandler) Handle(ctx context.Context, value any) error { func (a *ProjectEventHandler) Handle(ctx context.Context, value interface{}) error {
switch v := value.(type) { switch v := value.(type) {
case *event.DeleteProjectEvent: case *event.DeleteProjectEvent:
return a.onProjectDelete(ctx, v) return a.onProjectDelete(ctx, v)

View File

@ -36,7 +36,7 @@ func (p *Handler) Name() string {
} }
// Handle ... // Handle ...
func (p *Handler) Handle(ctx context.Context, value any) error { func (p *Handler) Handle(ctx context.Context, value interface{}) error {
switch v := value.(type) { switch v := value.(type) {
case *event.PushArtifactEvent: case *event.PushArtifactEvent:
return p.handlePushArtifact(ctx, v) return p.handlePushArtifact(ctx, v)

View File

@ -82,7 +82,7 @@ func (suite *PreheatTestSuite) TestName() {
// TestHandle ... // TestHandle ...
func (suite *PreheatTestSuite) TestHandle() { func (suite *PreheatTestSuite) TestHandle() {
type args struct { type args struct {
data any data interface{}
} }
tests := []struct { tests := []struct {
name string name string

View File

@ -36,7 +36,7 @@ func (r *Handler) Name() string {
} }
// Handle ... // Handle ...
func (r *Handler) Handle(ctx context.Context, value any) error { func (r *Handler) Handle(ctx context.Context, value interface{}) error {
pushArtEvent, ok := value.(*event.PushArtifactEvent) pushArtEvent, ok := value.(*event.PushArtifactEvent)
if ok { if ok {
return r.handlePushArtifact(ctx, pushArtEvent) return r.handlePushArtifact(ctx, pushArtEvent)
@ -78,7 +78,7 @@ func (r *Handler) handlePushArtifact(ctx context.Context, event *event.PushArtif
Metadata: &model.ResourceMetadata{ Metadata: &model.ResourceMetadata{
Repository: &model.Repository{ Repository: &model.Repository{
Name: event.Repository, Name: event.Repository,
Metadata: map[string]any{ Metadata: map[string]interface{}{
"public": strconv.FormatBool(public), "public": strconv.FormatBool(public),
}, },
}, },
@ -138,7 +138,7 @@ func (r *Handler) handleCreateTag(ctx context.Context, event *event.CreateTagEve
Metadata: &model.ResourceMetadata{ Metadata: &model.ResourceMetadata{
Repository: &model.Repository{ Repository: &model.Repository{
Name: event.Repository, Name: event.Repository,
Metadata: map[string]any{ Metadata: map[string]interface{}{
"public": strconv.FormatBool(public), "public": strconv.FormatBool(public),
}, },
}, },

View File

@ -17,7 +17,7 @@ func TestMain(m *testing.M) {
} }
func TestBuildImageResourceURL(t *testing.T) { func TestBuildImageResourceURL(t *testing.T) {
cfg := map[string]any{ cfg := map[string]interface{}{
common.ExtEndpoint: "https://demo.goharbor.io", common.ExtEndpoint: "https://demo.goharbor.io",
} }
config.InitWithSettings(cfg) config.InitWithSettings(cfg)

View File

@ -39,7 +39,7 @@ func (a *Handler) Name() string {
} }
// Handle preprocess artifact event data and then publish hook event // Handle preprocess artifact event data and then publish hook event
func (a *Handler) Handle(ctx context.Context, value any) error { func (a *Handler) Handle(ctx context.Context, value interface{}) error {
if !config.NotificationEnable(ctx) { if !config.NotificationEnable(ctx) {
log.Debug("notification feature is not enabled") log.Debug("notification feature is not enabled")
return nil return nil

View File

@ -45,7 +45,7 @@ func (r *ReplicationHandler) Name() string {
} }
// Handle ... // Handle ...
func (r *ReplicationHandler) Handle(ctx context.Context, value any) error { func (r *ReplicationHandler) Handle(ctx context.Context, value interface{}) error {
if !config.NotificationEnable(ctx) { if !config.NotificationEnable(ctx) {
log.Debug("notification feature is not enabled") log.Debug("notification feature is not enabled")
return nil return nil

View File

@ -73,7 +73,7 @@ func TestReplicationHandler_Handle(t *testing.T) {
handler := &ReplicationHandler{} handler := &ReplicationHandler{}
type args struct { type args struct {
data any data interface{}
} }
tests := []struct { tests := []struct {
name string name string

View File

@ -40,7 +40,7 @@ func (r *RetentionHandler) Name() string {
} }
// Handle ... // Handle ...
func (r *RetentionHandler) Handle(ctx context.Context, value any) error { func (r *RetentionHandler) Handle(ctx context.Context, value interface{}) error {
if !config.NotificationEnable(ctx) { if !config.NotificationEnable(ctx) {
log.Debug("notification feature is not enabled") log.Debug("notification feature is not enabled")
return nil return nil

View File

@ -61,7 +61,7 @@ func TestRetentionHandler_Handle(t *testing.T) {
}, nil) }, nil)
type args struct { type args struct {
data any data interface{}
} }
tests := []struct { tests := []struct {
name string name string

View File

@ -38,7 +38,7 @@ func (qp *Handler) Name() string {
} }
// Handle ... // Handle ...
func (qp *Handler) Handle(ctx context.Context, value any) error { func (qp *Handler) Handle(ctx context.Context, value interface{}) error {
quotaEvent, ok := value.(*event.QuotaEvent) quotaEvent, ok := value.(*event.QuotaEvent)
if !ok { if !ok {
return errors.New("invalid quota event type") return errors.New("invalid quota event type")

View File

@ -53,7 +53,7 @@ func TestQuotaPreprocessHandler(t *testing.T) {
// SetupSuite prepares env for test suite. // SetupSuite prepares env for test suite.
func (suite *QuotaPreprocessHandlerSuite) SetupSuite() { func (suite *QuotaPreprocessHandlerSuite) SetupSuite() {
common_dao.PrepareTestForPostgresSQL() common_dao.PrepareTestForPostgresSQL()
cfg := map[string]any{ cfg := map[string]interface{}{
common.NotificationEnable: true, common.NotificationEnable: true,
} }
config.InitWithSettings(cfg) config.InitWithSettings(cfg)
@ -110,7 +110,7 @@ func (m *MockHandler) Name() string {
} }
// Handle ... // Handle ...
func (m *MockHandler) Handle(ctx context.Context, value any) error { func (m *MockHandler) Handle(ctx context.Context, value interface{}) error {
return nil return nil
} }

View File

@ -42,7 +42,7 @@ func (si *Handler) Name() string {
} }
// Handle preprocess chart event data and then publish hook event // Handle preprocess chart event data and then publish hook event
func (si *Handler) Handle(ctx context.Context, value any) error { func (si *Handler) Handle(ctx context.Context, value interface{}) error {
if value == nil { if value == nil {
return errors.New("empty scan artifact event") return errors.New("empty scan artifact event")
} }
@ -129,7 +129,7 @@ func constructScanImagePayload(ctx context.Context, event *event.ScanImageEvent,
// Wait for reasonable time to make sure the report is ready // Wait for reasonable time to make sure the report is ready
// Interval=500ms and total time = 5s // Interval=500ms and total time = 5s
// If the report is still not ready in the total time, then failed at then // If the report is still not ready in the total time, then failed at then
for range 10 { for i := 0; i < 10; i++ {
// First check in case it is ready // First check in case it is ready
if re, err := scan.DefaultController.GetReport(ctx, art, []string{v1.MimeTypeNativeReport, v1.MimeTypeGenericVulnerabilityReport}); err == nil { if re, err := scan.DefaultController.GetReport(ctx, art, []string{v1.MimeTypeNativeReport, v1.MimeTypeGenericVulnerabilityReport}); err == nil {
if len(re) > 0 && len(re[0].Report) > 0 { if len(re) > 0 && len(re[0].Report) > 0 {
@ -142,7 +142,7 @@ func constructScanImagePayload(ctx context.Context, event *event.ScanImageEvent,
time.Sleep(500 * time.Millisecond) time.Sleep(500 * time.Millisecond)
} }
scanSummaries := map[string]any{} scanSummaries := map[string]interface{}{}
if event.ScanType == v1.ScanTypeVulnerability { if event.ScanType == v1.ScanTypeVulnerability {
scanSummaries, err = scan.DefaultController.GetSummary(ctx, art, event.ScanType, []string{v1.MimeTypeNativeReport, v1.MimeTypeGenericVulnerabilityReport}) scanSummaries, err = scan.DefaultController.GetSummary(ctx, art, event.ScanType, []string{v1.MimeTypeNativeReport, v1.MimeTypeGenericVulnerabilityReport})
if err != nil { if err != nil {
@ -150,7 +150,7 @@ func constructScanImagePayload(ctx context.Context, event *event.ScanImageEvent,
} }
} }
sbomOverview := map[string]any{} sbomOverview := map[string]interface{}{}
if event.ScanType == v1.ScanTypeSbom { if event.ScanType == v1.ScanTypeSbom {
sbomOverview, err = scan.DefaultController.GetSummary(ctx, art, event.ScanType, []string{v1.MimeTypeSBOMReport}) sbomOverview, err = scan.DefaultController.GetSummary(ctx, art, event.ScanType, []string{v1.MimeTypeSBOMReport})
if err != nil { if err != nil {

View File

@ -63,7 +63,7 @@ func TestScanImagePreprocessHandler(t *testing.T) {
// SetupSuite prepares env for test suite. // SetupSuite prepares env for test suite.
func (suite *ScanImagePreprocessHandlerSuite) SetupSuite() { func (suite *ScanImagePreprocessHandlerSuite) SetupSuite() {
common_dao.PrepareTestForPostgresSQL() common_dao.PrepareTestForPostgresSQL()
cfg := map[string]any{ cfg := map[string]interface{}{
common.NotificationEnable: true, common.NotificationEnable: true,
} }
config.InitWithSettings(cfg) config.InitWithSettings(cfg)
@ -92,7 +92,7 @@ func (suite *ScanImagePreprocessHandlerSuite) SetupSuite() {
mc := &scantesting.Controller{} mc := &scantesting.Controller{}
var options []report.Option var options []report.Option
s := make(map[string]any) s := make(map[string]interface{})
mc.On("GetSummary", a, []string{v1.MimeTypeNativeReport}, options).Return(s, nil) mc.On("GetSummary", a, []string{v1.MimeTypeNativeReport}, options).Return(s, nil)
mock.OnAnything(mc, "GetSummary").Return(s, nil) mock.OnAnything(mc, "GetSummary").Return(s, nil)
mock.OnAnything(mc, "GetReport").Return(reports, nil) mock.OnAnything(mc, "GetReport").Return(reports, nil)
@ -153,7 +153,7 @@ func (m *MockHTTPHandler) Name() string {
} }
// Handle ... // Handle ...
func (m *MockHTTPHandler) Handle(ctx context.Context, value any) error { func (m *MockHTTPHandler) Handle(ctx context.Context, value interface{}) error {
return nil return nil
} }

View File

@ -33,7 +33,7 @@ type robotEventTestSuite struct {
} }
func (t *tagEventTestSuite) TestResolveOfCreateRobotEventMetadata() { func (t *tagEventTestSuite) TestResolveOfCreateRobotEventMetadata() {
cfg := map[string]any{ cfg := map[string]interface{}{
common.RobotPrefix: "robot$", common.RobotPrefix: "robot$",
} }
config.InitWithSettings(cfg) config.InitWithSettings(cfg)
@ -57,7 +57,7 @@ func (t *tagEventTestSuite) TestResolveOfCreateRobotEventMetadata() {
} }
func (t *tagEventTestSuite) TestResolveOfDeleteRobotEventMetadata() { func (t *tagEventTestSuite) TestResolveOfDeleteRobotEventMetadata() {
cfg := map[string]any{ cfg := map[string]interface{}{
common.RobotPrefix: "robot$", common.RobotPrefix: "robot$",
} }
config.InitWithSettings(cfg) config.InitWithSettings(cfg)

View File

@ -75,7 +75,7 @@ type controller struct {
// Start starts the manual GC // Start starts the manual GC
func (c *controller) Start(ctx context.Context, policy Policy, trigger string) (int64, error) { func (c *controller) Start(ctx context.Context, policy Policy, trigger string) (int64, error) {
para := make(map[string]any) para := make(map[string]interface{})
para["delete_untagged"] = policy.DeleteUntagged para["delete_untagged"] = policy.DeleteUntagged
para["dry_run"] = policy.DryRun para["dry_run"] = policy.DryRun
para["workers"] = policy.Workers para["workers"] = policy.Workers
@ -129,7 +129,7 @@ func (c *controller) ListExecutions(ctx context.Context, query *q.Query) ([]*Exe
// GetExecution ... // GetExecution ...
func (c *controller) GetExecution(ctx context.Context, id int64) (*Execution, error) { func (c *controller) GetExecution(ctx context.Context, id int64) (*Execution, error) {
execs, err := c.exeMgr.List(ctx, &q.Query{ execs, err := c.exeMgr.List(ctx, &q.Query{
Keywords: map[string]any{ Keywords: map[string]interface{}{
"ID": id, "ID": id,
"VendorType": job.GarbageCollectionVendorType, "VendorType": job.GarbageCollectionVendorType,
}, },
@ -147,7 +147,7 @@ func (c *controller) GetExecution(ctx context.Context, id int64) (*Execution, er
// GetTask ... // GetTask ...
func (c *controller) GetTask(ctx context.Context, id int64) (*Task, error) { func (c *controller) GetTask(ctx context.Context, id int64) (*Task, error) {
tasks, err := c.taskMgr.List(ctx, &q.Query{ tasks, err := c.taskMgr.List(ctx, &q.Query{
Keywords: map[string]any{ Keywords: map[string]interface{}{
"ID": id, "ID": id,
"VendorType": job.GarbageCollectionVendorType, "VendorType": job.GarbageCollectionVendorType,
}, },
@ -203,7 +203,7 @@ func (c *controller) GetSchedule(ctx context.Context) (*scheduler.Schedule, erro
// CreateSchedule ... // CreateSchedule ...
func (c *controller) CreateSchedule(ctx context.Context, cronType, cron string, policy Policy) (int64, error) { func (c *controller) CreateSchedule(ctx context.Context, cronType, cron string, policy Policy) (int64, error) {
extras := make(map[string]any) extras := make(map[string]interface{})
extras["delete_untagged"] = policy.DeleteUntagged extras["delete_untagged"] = policy.DeleteUntagged
extras["workers"] = policy.Workers extras["workers"] = policy.Workers
return c.schedulerMgr.Schedule(ctx, job.GarbageCollectionVendorType, -1, cronType, cron, job.GarbageCollectionVendorType, policy, extras) return c.schedulerMgr.Schedule(ctx, job.GarbageCollectionVendorType, -1, cronType, cron, job.GarbageCollectionVendorType, policy, extras)

View File

@ -38,7 +38,7 @@ func (g *gcCtrTestSuite) TestStart() {
g.taskMgr.On("Create", mock.Anything, mock.Anything, mock.Anything).Return(int64(1), nil) g.taskMgr.On("Create", mock.Anything, mock.Anything, mock.Anything).Return(int64(1), nil)
g.taskMgr.On("Stop", mock.Anything, mock.Anything).Return(nil) g.taskMgr.On("Stop", mock.Anything, mock.Anything).Return(nil)
dataMap := make(map[string]any) dataMap := make(map[string]interface{})
p := Policy{ p := Policy{
DeleteUntagged: true, DeleteUntagged: true,
ExtraAttrs: dataMap, ExtraAttrs: dataMap,
@ -146,7 +146,7 @@ func (g *gcCtrTestSuite) TestCreateSchedule() {
g.scheduler.On("Schedule", mock.Anything, mock.Anything, mock.Anything, mock.Anything, g.scheduler.On("Schedule", mock.Anything, mock.Anything, mock.Anything, mock.Anything,
mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(int64(1), nil) mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(int64(1), nil)
dataMap := make(map[string]any) dataMap := make(map[string]interface{})
p := Policy{ p := Policy{
DeleteUntagged: true, DeleteUntagged: true,
ExtraAttrs: dataMap, ExtraAttrs: dataMap,

View File

@ -24,7 +24,7 @@ type Policy struct {
DeleteUntagged bool `json:"deleteuntagged"` DeleteUntagged bool `json:"deleteuntagged"`
DryRun bool `json:"dryrun"` DryRun bool `json:"dryrun"`
Workers int `json:"workers"` Workers int `json:"workers"`
ExtraAttrs map[string]any `json:"extra_attrs"` ExtraAttrs map[string]interface{} `json:"extra_attrs"`
} }
// TriggerType represents the type of trigger. // TriggerType represents the type of trigger.
@ -47,7 +47,7 @@ type Execution struct {
Status string Status string
StatusMessage string StatusMessage string
Trigger string Trigger string
ExtraAttrs map[string]any ExtraAttrs map[string]interface{}
StartTime time.Time StartTime time.Time
UpdateTime time.Time UpdateTime time.Time
} }

View File

@ -48,7 +48,7 @@ func (c *controller) GetHealth(_ context.Context) *OverallHealthStatus {
for name, checker := range registry { for name, checker := range registry {
go check(name, checker, timeout, ch) go check(name, checker, timeout, ch)
} }
for range len(registry) { for i := 0; i < len(registry); i++ {
componentStatus := <-ch componentStatus := <-ch
if len(componentStatus.Error) != 0 { if len(componentStatus.Error) != 0 {
isHealthy = false isHealthy = false

View File

@ -138,7 +138,7 @@ func (c *controller) Get(ctx context.Context, digest string) (*Icon, error) {
} else { } else {
// read icon from blob // read icon from blob
artifacts, err := c.artMgr.List(ctx, &q.Query{ artifacts, err := c.artMgr.List(ctx, &q.Query{
Keywords: map[string]any{ Keywords: map[string]interface{}{
"Icon": digest, "Icon": digest,
}, },
}) })

View File

@ -17,7 +17,6 @@ package jobmonitor
import ( import (
"context" "context"
"fmt" "fmt"
"slices"
"strings" "strings"
"time" "time"
@ -279,7 +278,12 @@ func (w *monitorController) ListQueues(ctx context.Context) ([]*jm.Queue, error)
} }
func skippedUnusedJobType(jobType string) bool { func skippedUnusedJobType(jobType string) bool {
return slices.Contains(skippedJobTypes, jobType) for _, t := range skippedJobTypes {
if jobType == t {
return true
}
}
return false
} }
func (w *monitorController) PauseJobQueues(ctx context.Context, jobType string) error { func (w *monitorController) PauseJobQueues(ctx context.Context, jobType string) error {

View File

@ -22,7 +22,7 @@ type Execution struct {
Status string Status string
StatusMessage string StatusMessage string
Trigger string Trigger string
ExtraAttrs map[string]any ExtraAttrs map[string]interface{}
StartTime time.Time StartTime time.Time
EndTime time.Time EndTime time.Time
} }

View File

@ -35,7 +35,7 @@ type SchedulerController interface {
// Get the schedule // Get the schedule
Get(ctx context.Context, vendorType string) (*scheduler.Schedule, error) Get(ctx context.Context, vendorType string) (*scheduler.Schedule, error)
// Create with cron type & string // Create with cron type & string
Create(ctx context.Context, vendorType, cronType, cron, callbackFuncName string, policy any, extrasParam map[string]any) (int64, error) Create(ctx context.Context, vendorType, cronType, cron, callbackFuncName string, policy interface{}, extrasParam map[string]interface{}) (int64, error)
// Delete the schedule // Delete the schedule
Delete(ctx context.Context, vendorType string) error Delete(ctx context.Context, vendorType string) error
// List lists schedules // List lists schedules
@ -76,7 +76,7 @@ func (s *schedulerController) Get(ctx context.Context, vendorType string) (*sche
} }
func (s *schedulerController) Create(ctx context.Context, vendorType, cronType, cron, callbackFuncName string, func (s *schedulerController) Create(ctx context.Context, vendorType, cronType, cron, callbackFuncName string,
policy any, extrasParam map[string]any) (int64, error) { policy interface{}, extrasParam map[string]interface{}) (int64, error) {
return s.schedulerMgr.Schedule(ctx, vendorType, -1, cronType, cron, callbackFuncName, policy, extrasParam) return s.schedulerMgr.Schedule(ctx, vendorType, -1, cronType, cron, callbackFuncName, policy, extrasParam)
} }

View File

@ -49,7 +49,7 @@ func (s *ScheduleTestSuite) TestCreateSchedule() {
s.scheduler.On("Schedule", mock.Anything, mock.Anything, mock.Anything, mock.Anything, s.scheduler.On("Schedule", mock.Anything, mock.Anything, mock.Anything, mock.Anything,
mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(int64(1), nil) mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(int64(1), nil)
dataMap := make(map[string]any) dataMap := make(map[string]interface{})
p := purge.JobPolicy{} p := purge.JobPolicy{}
id, err := s.ctl.Create(nil, job.PurgeAuditVendorType, "Daily", "* * * * * *", purge.SchedulerCallback, p, dataMap) id, err := s.ctl.Create(nil, job.PurgeAuditVendorType, "Daily", "* * * * * *", purge.SchedulerCallback, p, dataMap)
s.Nil(err) s.Nil(err)
@ -76,7 +76,7 @@ func (s *ScheduleTestSuite) TestGetSchedule() {
func (s *ScheduleTestSuite) TestListSchedule() { func (s *ScheduleTestSuite) TestListSchedule() {
mock.OnAnything(s.scheduler, "ListSchedules").Return([]*scheduler.Schedule{ mock.OnAnything(s.scheduler, "ListSchedules").Return([]*scheduler.Schedule{
{ID: 1, VendorType: "GARBAGE_COLLECTION", CRON: "0 0 0 * * *", ExtraAttrs: map[string]any{"args": "sample args"}}}, nil).Once() {ID: 1, VendorType: "GARBAGE_COLLECTION", CRON: "0 0 0 * * *", ExtraAttrs: map[string]interface{}{"args": "sample args"}}}, nil).Once()
schedules, err := s.scheduler.ListSchedules(nil, nil) schedules, err := s.scheduler.ListSchedules(nil, nil)
s.Assert().Nil(err) s.Assert().Nil(err)
s.Assert().Equal(1, len(schedules)) s.Assert().Equal(1, len(schedules))

View File

@ -29,7 +29,7 @@ import (
"github.com/goharbor/harbor/src/testing/pkg/ldap" "github.com/goharbor/harbor/src/testing/pkg/ldap"
) )
var defaultConfigWithVerifyCert = map[string]any{ var defaultConfigWithVerifyCert = map[string]interface{}{
common.ExtEndpoint: "https://host01.com", common.ExtEndpoint: "https://host01.com",
common.AUTHMode: common.LDAPAuth, common.AUTHMode: common.LDAPAuth,
common.DatabaseType: "postgresql", common.DatabaseType: "postgresql",

View File

@ -34,17 +34,17 @@ import (
// Controller defines the operation related to project member // Controller defines the operation related to project member
type Controller interface { type Controller interface {
// Get gets the project member with ID // Get gets the project member with ID
Get(ctx context.Context, projectNameOrID any, memberID int) (*models.Member, error) Get(ctx context.Context, projectNameOrID interface{}, memberID int) (*models.Member, error)
// Create add project member to project // Create add project member to project
Create(ctx context.Context, projectNameOrID any, req Request) (int, error) Create(ctx context.Context, projectNameOrID interface{}, req Request) (int, error)
// Delete member from project // Delete member from project
Delete(ctx context.Context, projectNameOrID any, memberID int) error Delete(ctx context.Context, projectNameOrID interface{}, memberID int) error
// List lists all project members with condition // List lists all project members with condition
List(ctx context.Context, projectNameOrID any, entityName string, query *q.Query) ([]*models.Member, error) List(ctx context.Context, projectNameOrID interface{}, entityName string, query *q.Query) ([]*models.Member, error)
// UpdateRole update the project member role // UpdateRole update the project member role
UpdateRole(ctx context.Context, projectNameOrID any, memberID int, role int) error UpdateRole(ctx context.Context, projectNameOrID interface{}, memberID int, role int) error
// Count get the total amount of project members // Count get the total amount of project members
Count(ctx context.Context, projectNameOrID any, query *q.Query) (int, error) Count(ctx context.Context, projectNameOrID interface{}, query *q.Query) (int, error)
// IsProjectAdmin judges if the user is a project admin of any project // IsProjectAdmin judges if the user is a project admin of any project
IsProjectAdmin(ctx context.Context, member commonmodels.User) (bool, error) IsProjectAdmin(ctx context.Context, member commonmodels.User) (bool, error)
} }
@ -89,7 +89,7 @@ func NewController() Controller {
return &controller{mgr: member.Mgr, projectMgr: pkg.ProjectMgr, userManager: user.New(), groupManager: usergroup.Mgr} return &controller{mgr: member.Mgr, projectMgr: pkg.ProjectMgr, userManager: user.New(), groupManager: usergroup.Mgr}
} }
func (c *controller) Count(ctx context.Context, projectNameOrID any, query *q.Query) (int, error) { func (c *controller) Count(ctx context.Context, projectNameOrID interface{}, query *q.Query) (int, error) {
p, err := c.projectMgr.Get(ctx, projectNameOrID) p, err := c.projectMgr.Get(ctx, projectNameOrID)
if err != nil { if err != nil {
return 0, err return 0, err
@ -97,7 +97,7 @@ func (c *controller) Count(ctx context.Context, projectNameOrID any, query *q.Qu
return c.mgr.GetTotalOfProjectMembers(ctx, p.ProjectID, query) return c.mgr.GetTotalOfProjectMembers(ctx, p.ProjectID, query)
} }
func (c *controller) UpdateRole(ctx context.Context, projectNameOrID any, memberID int, role int) error { func (c *controller) UpdateRole(ctx context.Context, projectNameOrID interface{}, memberID int, role int) error {
p, err := c.projectMgr.Get(ctx, projectNameOrID) p, err := c.projectMgr.Get(ctx, projectNameOrID)
if err != nil { if err != nil {
return err return err
@ -108,7 +108,7 @@ func (c *controller) UpdateRole(ctx context.Context, projectNameOrID any, member
return c.mgr.UpdateRole(ctx, p.ProjectID, memberID, role) return c.mgr.UpdateRole(ctx, p.ProjectID, memberID, role)
} }
func (c *controller) Get(ctx context.Context, projectNameOrID any, memberID int) (*models.Member, error) { func (c *controller) Get(ctx context.Context, projectNameOrID interface{}, memberID int) (*models.Member, error) {
p, err := c.projectMgr.Get(ctx, projectNameOrID) p, err := c.projectMgr.Get(ctx, projectNameOrID)
if err != nil { if err != nil {
return nil, err return nil, err
@ -119,7 +119,7 @@ func (c *controller) Get(ctx context.Context, projectNameOrID any, memberID int)
return c.mgr.Get(ctx, p.ProjectID, memberID) return c.mgr.Get(ctx, p.ProjectID, memberID)
} }
func (c *controller) Create(ctx context.Context, projectNameOrID any, req Request) (int, error) { func (c *controller) Create(ctx context.Context, projectNameOrID interface{}, req Request) (int, error) {
p, err := c.projectMgr.Get(ctx, projectNameOrID) p, err := c.projectMgr.Get(ctx, projectNameOrID)
if err != nil { if err != nil {
return 0, err return 0, err
@ -239,7 +239,7 @@ func isValidRole(role int) bool {
} }
} }
func (c *controller) List(ctx context.Context, projectNameOrID any, entityName string, query *q.Query) ([]*models.Member, error) { func (c *controller) List(ctx context.Context, projectNameOrID interface{}, entityName string, query *q.Query) ([]*models.Member, error) {
p, err := c.projectMgr.Get(ctx, projectNameOrID) p, err := c.projectMgr.Get(ctx, projectNameOrID)
if err != nil { if err != nil {
return nil, err return nil, err
@ -254,7 +254,7 @@ func (c *controller) List(ctx context.Context, projectNameOrID any, entityName s
return c.mgr.List(ctx, pm, query) return c.mgr.List(ctx, pm, query)
} }
func (c *controller) Delete(ctx context.Context, projectNameOrID any, memberID int) error { func (c *controller) Delete(ctx context.Context, projectNameOrID interface{}, memberID int) error {
p, err := c.projectMgr.Get(ctx, projectNameOrID) p, err := c.projectMgr.Get(ctx, projectNameOrID)
if err != nil { if err != nil {
return err return err

View File

@ -180,7 +180,7 @@ func (c *controller) CreateInstance(ctx context.Context, instance *providerModel
// Avoid duplicated endpoint // Avoid duplicated endpoint
var query = &q.Query{ var query = &q.Query{
Keywords: map[string]any{ Keywords: map[string]interface{}{
"endpoint": instance.Endpoint, "endpoint": instance.Endpoint,
}, },
} }
@ -208,7 +208,7 @@ func (c *controller) DeleteInstance(ctx context.Context, id int64) error {
} }
// delete instance should check the instance whether be used by policies // delete instance should check the instance whether be used by policies
policies, err := c.ListPolicies(ctx, &q.Query{ policies, err := c.ListPolicies(ctx, &q.Query{
Keywords: map[string]any{ Keywords: map[string]interface{}{
"provider_id": id, "provider_id": id,
}, },
}) })
@ -235,7 +235,7 @@ func (c *controller) UpdateInstance(ctx context.Context, instance *providerModel
if !instance.Enabled { if !instance.Enabled {
// update instance should check the instance whether be used by policies // update instance should check the instance whether be used by policies
policies, err := c.ListPolicies(ctx, &q.Query{ policies, err := c.ListPolicies(ctx, &q.Query{
Keywords: map[string]any{ Keywords: map[string]interface{}{
"provider_id": instance.ID, "provider_id": instance.ID,
}, },
}) })
@ -311,7 +311,7 @@ func (c *controller) CreatePolicy(ctx context.Context, schema *policyModels.Sche
schema.Trigger.Type == policyModels.TriggerTypeScheduled && schema.Trigger.Type == policyModels.TriggerTypeScheduled &&
len(schema.Trigger.Settings.Cron) > 0 { len(schema.Trigger.Settings.Cron) > 0 {
// schedule and update policy // schedule and update policy
extras := make(map[string]any) extras := make(map[string]interface{})
if _, err = c.scheduler.Schedule(ctx, job.P2PPreheatVendorType, id, "", schema.Trigger.Settings.Cron, if _, err = c.scheduler.Schedule(ctx, job.P2PPreheatVendorType, id, "", schema.Trigger.Settings.Cron,
SchedulerCallback, TriggerParam{PolicyID: id}, extras); err != nil { SchedulerCallback, TriggerParam{PolicyID: id}, extras); err != nil {
return 0, err return 0, err
@ -409,7 +409,7 @@ func (c *controller) UpdatePolicy(ctx context.Context, schema *policyModels.Sche
// schedule new // schedule new
if needSch { if needSch {
extras := make(map[string]any) extras := make(map[string]interface{})
if _, err := c.scheduler.Schedule(ctx, job.P2PPreheatVendorType, schema.ID, "", cron, SchedulerCallback, if _, err := c.scheduler.Schedule(ctx, job.P2PPreheatVendorType, schema.ID, "", cron, SchedulerCallback,
TriggerParam{PolicyID: schema.ID}, extras); err != nil { TriggerParam{PolicyID: schema.ID}, extras); err != nil {
return err return err
@ -465,7 +465,7 @@ func (c *controller) DeletePoliciesOfProject(ctx context.Context, project int64)
// deleteExecs delete executions // deleteExecs delete executions
func (c *controller) deleteExecs(ctx context.Context, vendorID int64) error { func (c *controller) deleteExecs(ctx context.Context, vendorID int64) error {
executions, err := c.executionMgr.List(ctx, &q.Query{ executions, err := c.executionMgr.List(ctx, &q.Query{
Keywords: map[string]any{ Keywords: map[string]interface{}{
"VendorType": job.P2PPreheatVendorType, "VendorType": job.P2PPreheatVendorType,
"VendorID": vendorID, "VendorID": vendorID,
}, },

View File

@ -82,7 +82,7 @@ func (s *preheatSuite) SetupSuite() {
}, },
}, nil) }, nil)
s.fakeInstanceMgr.On("Save", mock.Anything, mock.Anything).Return(int64(1), nil) s.fakeInstanceMgr.On("Save", mock.Anything, mock.Anything).Return(int64(1), nil)
s.fakeInstanceMgr.On("Count", mock.Anything, &q.Query{Keywords: map[string]any{ s.fakeInstanceMgr.On("Count", mock.Anything, &q.Query{Keywords: map[string]interface{}{
"endpoint": "http://localhost", "endpoint": "http://localhost",
}}).Return(int64(1), nil) }}).Return(int64(1), nil)
s.fakeInstanceMgr.On("Count", mock.Anything, mock.Anything).Return(int64(0), nil) s.fakeInstanceMgr.On("Count", mock.Anything, mock.Anything).Return(int64(0), nil)
@ -117,7 +117,7 @@ func (s *preheatSuite) TearDownSuite() {
func (s *preheatSuite) TestGetAvailableProviders() { func (s *preheatSuite) TestGetAvailableProviders() {
providers, err := s.controller.GetAvailableProviders() providers, err := s.controller.GetAvailableProviders()
s.Equal(2, len(providers)) s.Equal(2, len(providers))
expectProviders := map[string]any{} expectProviders := map[string]interface{}{}
expectProviders["dragonfly"] = nil expectProviders["dragonfly"] = nil
expectProviders["kraken"] = nil expectProviders["kraken"] = nil
_, ok := expectProviders[providers[0].ID] _, ok := expectProviders[providers[0].ID]
@ -177,7 +177,7 @@ func (s *preheatSuite) TestCreateInstance() {
func (s *preheatSuite) TestDeleteInstance() { func (s *preheatSuite) TestDeleteInstance() {
// instance be used should not be deleted // instance be used should not be deleted
s.fakeInstanceMgr.On("Get", s.ctx, int64(1)).Return(&providerModel.Instance{ID: 1}, nil) s.fakeInstanceMgr.On("Get", s.ctx, int64(1)).Return(&providerModel.Instance{ID: 1}, nil)
s.fakePolicyMgr.On("ListPolicies", s.ctx, &q.Query{Keywords: map[string]any{"provider_id": int64(1)}}).Return([]*policy.Schema{ s.fakePolicyMgr.On("ListPolicies", s.ctx, &q.Query{Keywords: map[string]interface{}{"provider_id": int64(1)}}).Return([]*policy.Schema{
{ {
ProviderID: 1, ProviderID: 1,
}, },
@ -186,7 +186,7 @@ func (s *preheatSuite) TestDeleteInstance() {
s.Error(err, "instance should not be deleted") s.Error(err, "instance should not be deleted")
s.fakeInstanceMgr.On("Get", s.ctx, int64(2)).Return(&providerModel.Instance{ID: 2}, nil) s.fakeInstanceMgr.On("Get", s.ctx, int64(2)).Return(&providerModel.Instance{ID: 2}, nil)
s.fakePolicyMgr.On("ListPolicies", s.ctx, &q.Query{Keywords: map[string]any{"provider_id": int64(2)}}).Return([]*policy.Schema{}, nil) s.fakePolicyMgr.On("ListPolicies", s.ctx, &q.Query{Keywords: map[string]interface{}{"provider_id": int64(2)}}).Return([]*policy.Schema{}, nil)
s.fakeInstanceMgr.On("Delete", s.ctx, int64(2)).Return(nil) s.fakeInstanceMgr.On("Delete", s.ctx, int64(2)).Return(nil)
err = s.controller.DeleteInstance(s.ctx, int64(2)) err = s.controller.DeleteInstance(s.ctx, int64(2))
s.NoError(err, "instance can be deleted") s.NoError(err, "instance can be deleted")
@ -202,7 +202,7 @@ func (s *preheatSuite) TestUpdateInstance() {
// disable instance should error due to with policy used // disable instance should error due to with policy used
s.fakeInstanceMgr.On("Get", s.ctx, int64(1001)).Return(&providerModel.Instance{ID: 1001}, nil) s.fakeInstanceMgr.On("Get", s.ctx, int64(1001)).Return(&providerModel.Instance{ID: 1001}, nil)
s.fakeInstanceMgr.On("Update", s.ctx, &providerModel.Instance{ID: 1001}).Return(nil) s.fakeInstanceMgr.On("Update", s.ctx, &providerModel.Instance{ID: 1001}).Return(nil)
s.fakePolicyMgr.On("ListPolicies", s.ctx, &q.Query{Keywords: map[string]any{"provider_id": int64(1001)}}).Return([]*policy.Schema{ s.fakePolicyMgr.On("ListPolicies", s.ctx, &q.Query{Keywords: map[string]interface{}{"provider_id": int64(1001)}}).Return([]*policy.Schema{
{ProviderID: 1001}, {ProviderID: 1001},
}, nil) }, nil)
err = s.controller.UpdateInstance(s.ctx, &providerModel.Instance{ID: 1001}) err = s.controller.UpdateInstance(s.ctx, &providerModel.Instance{ID: 1001})
@ -211,14 +211,14 @@ func (s *preheatSuite) TestUpdateInstance() {
// disable instance can be deleted if no policy used // disable instance can be deleted if no policy used
s.fakeInstanceMgr.On("Get", s.ctx, int64(1002)).Return(&providerModel.Instance{ID: 1002}, nil) s.fakeInstanceMgr.On("Get", s.ctx, int64(1002)).Return(&providerModel.Instance{ID: 1002}, nil)
s.fakeInstanceMgr.On("Update", s.ctx, &providerModel.Instance{ID: 1002}).Return(nil) s.fakeInstanceMgr.On("Update", s.ctx, &providerModel.Instance{ID: 1002}).Return(nil)
s.fakePolicyMgr.On("ListPolicies", s.ctx, &q.Query{Keywords: map[string]any{"provider_id": int64(1002)}}).Return([]*policy.Schema{}, nil) s.fakePolicyMgr.On("ListPolicies", s.ctx, &q.Query{Keywords: map[string]interface{}{"provider_id": int64(1002)}}).Return([]*policy.Schema{}, nil)
err = s.controller.UpdateInstance(s.ctx, &providerModel.Instance{ID: 1002}) err = s.controller.UpdateInstance(s.ctx, &providerModel.Instance{ID: 1002})
s.NoError(err, "instance can be disabled") s.NoError(err, "instance can be disabled")
// not support change vendor type // not support change vendor type
s.fakeInstanceMgr.On("Get", s.ctx, int64(1003)).Return(&providerModel.Instance{ID: 1003, Vendor: "dragonfly"}, nil) s.fakeInstanceMgr.On("Get", s.ctx, int64(1003)).Return(&providerModel.Instance{ID: 1003, Vendor: "dragonfly"}, nil)
s.fakeInstanceMgr.On("Update", s.ctx, &providerModel.Instance{ID: 1003, Vendor: "kraken"}).Return(nil) s.fakeInstanceMgr.On("Update", s.ctx, &providerModel.Instance{ID: 1003, Vendor: "kraken"}).Return(nil)
s.fakePolicyMgr.On("ListPolicies", s.ctx, &q.Query{Keywords: map[string]any{"provider_id": int64(1003)}}).Return([]*policy.Schema{}, nil) s.fakePolicyMgr.On("ListPolicies", s.ctx, &q.Query{Keywords: map[string]interface{}{"provider_id": int64(1003)}}).Return([]*policy.Schema{}, nil)
err = s.controller.UpdateInstance(s.ctx, &providerModel.Instance{ID: 1003, Vendor: "kraken"}) err = s.controller.UpdateInstance(s.ctx, &providerModel.Instance{ID: 1003, Vendor: "kraken"})
s.Error(err, "provider vendor cannot be changed") s.Error(err, "provider vendor cannot be changed")
} }
@ -347,7 +347,7 @@ func (s *preheatSuite) TestDeletePoliciesOfProject() {
for _, p := range fakePolicies { for _, p := range fakePolicies {
s.fakePolicyMgr.On("Get", s.ctx, p.ID).Return(p, nil) s.fakePolicyMgr.On("Get", s.ctx, p.ID).Return(p, nil)
s.fakePolicyMgr.On("Delete", s.ctx, p.ID).Return(nil) s.fakePolicyMgr.On("Delete", s.ctx, p.ID).Return(nil)
s.fakeExecutionMgr.On("List", s.ctx, &q.Query{Keywords: map[string]any{"VendorID": p.ID, "VendorType": "P2P_PREHEAT"}}).Return([]*taskModel.Execution{}, nil) s.fakeExecutionMgr.On("List", s.ctx, &q.Query{Keywords: map[string]interface{}{"VendorID": p.ID, "VendorType": "P2P_PREHEAT"}}).Return([]*taskModel.Execution{}, nil)
} }
err := s.controller.DeletePoliciesOfProject(s.ctx, 10) err := s.controller.DeletePoliciesOfProject(s.ctx, 10)

Some files were not shown because too many files have changed in this diff Show More