mirror of
https://github.com/goharbor/harbor
synced 2025-05-21 12:05:34 +00:00
Merge remote-tracking branch 'upstream/main'
This commit is contained in:
commit
ce6bf73884
@ -8,5 +8,8 @@
|
||||
* Add date here... Add signature here...
|
||||
- Add your reason here...
|
||||
|
||||
* Sept 23 2021 <jiaoya@vmware.com>
|
||||
- Refresh base image
|
||||
|
||||
* Jul 15 2021 <danfengl@vmware.com>
|
||||
- Create this file to trigger build base action in buld-package workflow
|
20
.github/workflows/CI.yml
vendored
20
.github/workflows/CI.yml
vendored
@ -29,10 +29,10 @@ jobs:
|
||||
- ubuntu-latest
|
||||
timeout-minutes: 100
|
||||
steps:
|
||||
- name: Set up Go 1.16
|
||||
- name: Set up Go 1.17
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: 1.16.5
|
||||
go-version: 1.17.2
|
||||
id: go
|
||||
- name: setup Docker
|
||||
uses: docker-practice/actions-setup-docker@0.0.1
|
||||
@ -95,10 +95,10 @@ jobs:
|
||||
- ubuntu-latest
|
||||
timeout-minutes: 100
|
||||
steps:
|
||||
- name: Set up Go 1.16
|
||||
- name: Set up Go 1.17
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: 1.16.5
|
||||
go-version: 1.17.2
|
||||
id: go
|
||||
- name: setup Docker
|
||||
uses: docker-practice/actions-setup-docker@0.0.1
|
||||
@ -155,10 +155,10 @@ jobs:
|
||||
- ubuntu-latest
|
||||
timeout-minutes: 100
|
||||
steps:
|
||||
- name: Set up Go 1.16
|
||||
- name: Set up Go 1.17
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: 1.16.5
|
||||
go-version: 1.17.2
|
||||
id: go
|
||||
- name: setup Docker
|
||||
uses: docker-practice/actions-setup-docker@0.0.1
|
||||
@ -215,10 +215,10 @@ jobs:
|
||||
- ubuntu-latest
|
||||
timeout-minutes: 100
|
||||
steps:
|
||||
- name: Set up Go 1.16
|
||||
- name: Set up Go 1.17
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: 1.16.5
|
||||
go-version: 1.17.2
|
||||
id: go
|
||||
- name: setup Docker
|
||||
uses: docker-practice/actions-setup-docker@0.0.1
|
||||
@ -273,10 +273,10 @@ jobs:
|
||||
- ubuntu-latest
|
||||
timeout-minutes: 100
|
||||
steps:
|
||||
- name: Set up Go 1.16
|
||||
- name: Set up Go 1.17
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: 1.16.5
|
||||
go-version: 1.17.2
|
||||
id: go
|
||||
- name: setup Docker
|
||||
uses: docker-practice/actions-setup-docker@0.0.1
|
||||
|
4
.github/workflows/build-package.yml
vendored
4
.github/workflows/build-package.yml
vendored
@ -26,10 +26,10 @@ jobs:
|
||||
service_account_key: ${{ secrets.GCP_SA_KEY }}
|
||||
export_default_credentials: true
|
||||
- run: gcloud info
|
||||
- name: Set up Go 1.16
|
||||
- name: Set up Go 1.17
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: 1.16.5
|
||||
go-version: 1.17.2
|
||||
id: go
|
||||
- name: setup Docker
|
||||
uses: docker-practice/actions-setup-docker@0.0.1
|
||||
|
4
.github/workflows/conformance_test.yml
vendored
4
.github/workflows/conformance_test.yml
vendored
@ -26,10 +26,10 @@ jobs:
|
||||
service_account_key: ${{ secrets.GCP_SA_KEY }}
|
||||
export_default_credentials: true
|
||||
- run: gcloud info
|
||||
- name: Set up Go 1.16
|
||||
- name: Set up Go 1.17
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: 1.16.5
|
||||
go-version: 1.17.2
|
||||
id: go
|
||||
- name: setup Docker
|
||||
uses: docker-practice/actions-setup-docker@0.0.1
|
||||
|
@ -161,7 +161,8 @@ Harbor backend is written in [Go](http://golang.org/). If you don't have a Harbo
|
||||
| 2.1 | 1.14.13 |
|
||||
| 2.2 | 1.15.6 |
|
||||
| 2.3 | 1.15.12 |
|
||||
| 2.4 | 1.16.5 |
|
||||
| 2.4 | 1.16.7 |
|
||||
| 2.5 | 1.17.2 |
|
||||
|
||||
Ensure your GOPATH and PATH have been configured in accordance with the Go environment instructions.
|
||||
|
||||
|
10
Makefile
10
Makefile
@ -9,7 +9,7 @@
|
||||
# compile_golangimage:
|
||||
# compile from golang image
|
||||
# for example: make compile_golangimage -e GOBUILDIMAGE= \
|
||||
# golang:1.16.5
|
||||
# golang:1.17.2
|
||||
# compile_core, compile_jobservice: compile specific binary
|
||||
#
|
||||
# build: build Harbor docker images from photon baseimage
|
||||
@ -110,8 +110,8 @@ PREPARE_VERSION_NAME=versions
|
||||
REGISTRYVERSION=v2.7.1-patch-2819-2553-redis
|
||||
NOTARYVERSION=v0.6.1
|
||||
NOTARYMIGRATEVERSION=v4.11.0
|
||||
TRIVYVERSION=v0.17.2
|
||||
TRIVYADAPTERVERSION=v0.19.0
|
||||
TRIVYVERSION=v0.20.1
|
||||
TRIVYADAPTERVERSION=v0.24.0
|
||||
|
||||
# version of chartmuseum for pulling the source code
|
||||
CHARTMUSEUM_SRC_TAG=v0.13.1
|
||||
@ -156,7 +156,7 @@ GOINSTALL=$(GOCMD) install
|
||||
GOTEST=$(GOCMD) test
|
||||
GODEP=$(GOTEST) -i
|
||||
GOFMT=gofmt -w
|
||||
GOBUILDIMAGE=golang:1.16.5
|
||||
GOBUILDIMAGE=golang:1.17.2
|
||||
GOBUILDPATHINCONTAINER=/harbor
|
||||
|
||||
# go build
|
||||
@ -446,7 +446,7 @@ build_base_docker:
|
||||
if [ -n "$(REGISTRYUSER)" ] && [ -n "$(REGISTRYPASSWORD)" ] ; then \
|
||||
docker login -u $(REGISTRYUSER) -p $(REGISTRYPASSWORD) ; \
|
||||
else \
|
||||
echo "No docker credentials provided, please make sure enough priviledges to access docker hub!" ; \
|
||||
echo "No docker credentials provided, please make sure enough privileges to access docker hub!" ; \
|
||||
fi
|
||||
@for name in $(BUILDBASETARGET); do \
|
||||
echo $$name ; \
|
||||
|
@ -1166,6 +1166,31 @@ paths:
|
||||
$ref: '#/responses/404'
|
||||
'500':
|
||||
$ref: '#/responses/500'
|
||||
/projects/{project_name}/repositories/{repository_name}/artifacts/{reference}/scan/stop:
|
||||
post:
|
||||
summary: Cancelling a scan job for a particular artifact
|
||||
description: Cancelling a scan job for a particular artifact
|
||||
tags:
|
||||
- scan
|
||||
operationId: stopScanArtifact
|
||||
parameters:
|
||||
- $ref: '#/parameters/requestId'
|
||||
- $ref: '#/parameters/projectName'
|
||||
- $ref: '#/parameters/repositoryName'
|
||||
- $ref: '#/parameters/reference'
|
||||
responses:
|
||||
'202':
|
||||
$ref: '#/responses/202'
|
||||
'400':
|
||||
$ref: '#/responses/400'
|
||||
'401':
|
||||
$ref: '#/responses/401'
|
||||
'403':
|
||||
$ref: '#/responses/403'
|
||||
'404':
|
||||
$ref: '#/responses/404'
|
||||
'500':
|
||||
$ref: '#/responses/500'
|
||||
/projects/{project_name}/repositories/{repository_name}/artifacts/{reference}/scan/{report_id}/log:
|
||||
get:
|
||||
summary: Get the log of the scan report
|
||||
@ -2697,6 +2722,8 @@ paths:
|
||||
- usergroup
|
||||
parameters:
|
||||
- $ref: '#/parameters/requestId'
|
||||
- $ref: '#/parameters/page'
|
||||
- $ref: '#/parameters/pageSize'
|
||||
- name: ldap_group_dn
|
||||
in: query
|
||||
type: string
|
||||
@ -2709,6 +2736,13 @@ paths:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/definitions/UserGroup'
|
||||
headers:
|
||||
X-Total-Count:
|
||||
description: The total count of available items
|
||||
type: integer
|
||||
Link:
|
||||
description: Link to previous page and next page
|
||||
type: string
|
||||
'401':
|
||||
$ref: '#/responses/401'
|
||||
'403':
|
||||
@ -2744,6 +2778,41 @@ paths:
|
||||
$ref: '#/responses/409'
|
||||
'500':
|
||||
$ref: '#/responses/500'
|
||||
/usergroups/search:
|
||||
get:
|
||||
summary: Search groups by groupname
|
||||
description: |
|
||||
This endpoint is to search groups by group name. It's open for all authenticated requests.
|
||||
tags:
|
||||
- usergroup
|
||||
operationId: searchUserGroups
|
||||
parameters:
|
||||
- $ref: '#/parameters/requestId'
|
||||
- $ref: '#/parameters/page'
|
||||
- $ref: '#/parameters/pageSize'
|
||||
- name: groupname
|
||||
in: query
|
||||
type: string
|
||||
required: true
|
||||
description: Group name for filtering results.
|
||||
responses:
|
||||
'200':
|
||||
description: Search groups successfully.
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/definitions/UserGroupSearchItem'
|
||||
headers:
|
||||
X-Total-Count:
|
||||
description: The total count of available items
|
||||
type: integer
|
||||
Link:
|
||||
description: Link to previous page and next page
|
||||
type: string
|
||||
'401':
|
||||
$ref: '#/responses/401'
|
||||
'500':
|
||||
$ref: '#/responses/500'
|
||||
'/usergroups/{group_id}':
|
||||
get:
|
||||
summary: Get user group information
|
||||
@ -4167,6 +4236,26 @@ paths:
|
||||
$ref: '#/responses/412'
|
||||
'500':
|
||||
$ref: '#/responses/500'
|
||||
/system/scanAll/stop:
|
||||
post:
|
||||
summary: Stop scanAll job execution
|
||||
description: Stop scanAll job execution
|
||||
parameters:
|
||||
- $ref: '#/parameters/requestId'
|
||||
tags:
|
||||
- scanAll
|
||||
operationId: stopScanAll
|
||||
responses:
|
||||
'202':
|
||||
$ref: '#/responses/202'
|
||||
'400':
|
||||
$ref: '#/responses/400'
|
||||
'401':
|
||||
$ref: '#/responses/401'
|
||||
'403':
|
||||
$ref: '#/responses/403'
|
||||
'500':
|
||||
$ref: '#/responses/500'
|
||||
/ping:
|
||||
get:
|
||||
operationId: getPing
|
||||
@ -6454,6 +6543,11 @@ definitions:
|
||||
type: string
|
||||
format: date-time
|
||||
description: The update time of the policy.
|
||||
speed:
|
||||
type: integer
|
||||
format: int32
|
||||
description: speed limit for each task
|
||||
x-isnullable: true # make this field optional to keep backward compatibility
|
||||
ReplicationTrigger:
|
||||
type: object
|
||||
properties:
|
||||
@ -6679,10 +6773,6 @@ definitions:
|
||||
additionalProperties:
|
||||
type: integer
|
||||
format: int64
|
||||
x-go-type:
|
||||
type: ResourceList
|
||||
import:
|
||||
package: "github.com/goharbor/harbor/src/pkg/quota/types"
|
||||
ReplicationExecution:
|
||||
type: object
|
||||
description: The replication execution
|
||||
@ -7689,6 +7779,18 @@ definitions:
|
||||
ldap_group_dn:
|
||||
type: string
|
||||
description: The DN of the LDAP group if group type is 1 (LDAP group).
|
||||
UserGroupSearchItem:
|
||||
type: object
|
||||
properties:
|
||||
id:
|
||||
type: integer
|
||||
description: The ID of the user group
|
||||
group_name:
|
||||
type: string
|
||||
description: The name of the user group
|
||||
group_type:
|
||||
type: integer
|
||||
description: 'The group type, 1 for LDAP group, 2 for HTTP group.'
|
||||
SupportedWebhookEventTypes:
|
||||
type: object
|
||||
description: Supportted webhook event types and notify types.
|
||||
@ -7821,10 +7923,18 @@ definitions:
|
||||
format: date-time
|
||||
InternalConfigurationsResponse:
|
||||
type: object
|
||||
x-go-type:
|
||||
type: InternalCfg
|
||||
import:
|
||||
package: "github.com/goharbor/harbor/src/lib/config"
|
||||
additionalProperties:
|
||||
$ref: '#/definitions/InternalConfigurationValue'
|
||||
InternalConfigurationValue:
|
||||
type: object
|
||||
properties:
|
||||
value:
|
||||
type: object
|
||||
description: The value of current config item
|
||||
editable:
|
||||
type: boolean
|
||||
x-omitempty: false
|
||||
description: The configure item can be updated or not
|
||||
ConfigurationsResponse:
|
||||
type: object
|
||||
properties:
|
||||
@ -7999,280 +8109,280 @@ definitions:
|
||||
description: 'The parameters of the policy, the values are dependent on the type of the policy.'
|
||||
Configurations:
|
||||
type: object
|
||||
properties:
|
||||
properties:
|
||||
auth_mode:
|
||||
type: string
|
||||
description: The auth mode of current system, such as "db_auth", "ldap_auth", "oidc_auth"
|
||||
description: The auth mode of current system, such as "db_auth", "ldap_auth", "oidc_auth"
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
email_from:
|
||||
type: string
|
||||
description: The sender name for Email notification.
|
||||
description: The sender name for Email notification.
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
email_host:
|
||||
type: string
|
||||
description: The hostname of SMTP server that sends Email notification.
|
||||
description: The hostname of SMTP server that sends Email notification.
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
email_identity:
|
||||
type: string
|
||||
description: By default it's empty so the email_username is picked
|
||||
description: By default it's empty so the email_username is picked
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
email_insecure:
|
||||
type: boolean
|
||||
description: Whether or not the certificate will be verified when Harbor tries to access the email server.
|
||||
description: Whether or not the certificate will be verified when Harbor tries to access the email server.
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
email_password:
|
||||
type: string
|
||||
description: Email password
|
||||
description: Email password
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
email_port:
|
||||
type: integer
|
||||
description: The port of SMTP server
|
||||
description: The port of SMTP server
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
email_ssl:
|
||||
type: boolean
|
||||
description: When it''s set to true the system will access Email server via TLS by default. If it''s set to false, it still will handle "STARTTLS" from server side.
|
||||
description: When it''s set to true the system will access Email server via TLS by default. If it''s set to false, it still will handle "STARTTLS" from server side.
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
email_username:
|
||||
type: string
|
||||
description: The username for authenticate against SMTP server
|
||||
description: The username for authenticate against SMTP server
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
ldap_base_dn:
|
||||
type: string
|
||||
description: The Base DN for LDAP binding.
|
||||
description: The Base DN for LDAP binding.
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
ldap_filter:
|
||||
type: string
|
||||
description: The filter for LDAP search
|
||||
description: The filter for LDAP search
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
ldap_group_base_dn:
|
||||
type: string
|
||||
description: The base DN to search LDAP group.
|
||||
description: The base DN to search LDAP group.
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
ldap_group_admin_dn:
|
||||
type: string
|
||||
description: Specify the ldap group which have the same privilege with Harbor admin
|
||||
description: Specify the ldap group which have the same privilege with Harbor admin
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
ldap_group_attribute_name:
|
||||
type: string
|
||||
description: The attribute which is used as identity of the LDAP group, default is cn.'
|
||||
description: The attribute which is used as identity of the LDAP group, default is cn.'
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
ldap_group_search_filter:
|
||||
type: string
|
||||
description: The filter to search the ldap group
|
||||
description: The filter to search the ldap group
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
ldap_group_search_scope:
|
||||
type: integer
|
||||
description: The scope to search ldap group. ''0-LDAP_SCOPE_BASE, 1-LDAP_SCOPE_ONELEVEL, 2-LDAP_SCOPE_SUBTREE''
|
||||
description: The scope to search ldap group. ''0-LDAP_SCOPE_BASE, 1-LDAP_SCOPE_ONELEVEL, 2-LDAP_SCOPE_SUBTREE''
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
ldap_scope:
|
||||
type: integer
|
||||
description: The scope to search ldap users,'0-LDAP_SCOPE_BASE, 1-LDAP_SCOPE_ONELEVEL, 2-LDAP_SCOPE_SUBTREE'
|
||||
description: The scope to search ldap users,'0-LDAP_SCOPE_BASE, 1-LDAP_SCOPE_ONELEVEL, 2-LDAP_SCOPE_SUBTREE'
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
ldap_search_dn:
|
||||
type: string
|
||||
description: The DN of the user to do the search.
|
||||
description: The DN of the user to do the search.
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
ldap_search_password:
|
||||
type: string
|
||||
description: The password of the ldap search dn
|
||||
description: The password of the ldap search dn
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
ldap_timeout:
|
||||
type: integer
|
||||
description: Timeout in seconds for connection to LDAP server
|
||||
description: Timeout in seconds for connection to LDAP server
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
ldap_uid:
|
||||
type: string
|
||||
description: The attribute which is used as identity for the LDAP binding, such as "CN" or "SAMAccountname"
|
||||
description: The attribute which is used as identity for the LDAP binding, such as "CN" or "SAMAccountname"
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
ldap_url:
|
||||
type: string
|
||||
description: The URL of LDAP server
|
||||
description: The URL of LDAP server
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
ldap_verify_cert:
|
||||
type: boolean
|
||||
description: Whether verify your OIDC server certificate, disable it if your OIDC server is hosted via self-hosted certificate.
|
||||
description: Whether verify your OIDC server certificate, disable it if your OIDC server is hosted via self-hosted certificate.
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
ldap_group_membership_attribute:
|
||||
type: string
|
||||
description: The user attribute to identify the group membership
|
||||
description: The user attribute to identify the group membership
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
project_creation_restriction:
|
||||
type: string
|
||||
description: Indicate who can create projects, it could be ''adminonly'' or ''everyone''.
|
||||
description: Indicate who can create projects, it could be ''adminonly'' or ''everyone''.
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
read_only:
|
||||
type: boolean
|
||||
description: The flag to indicate whether Harbor is in readonly mode.
|
||||
description: The flag to indicate whether Harbor is in readonly mode.
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
self_registration:
|
||||
type: boolean
|
||||
description: Whether the Harbor instance supports self-registration. If it''s set to false, admin need to add user to the instance.
|
||||
description: Whether the Harbor instance supports self-registration. If it''s set to false, admin need to add user to the instance.
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
token_expiration:
|
||||
type: integer
|
||||
description: The expiration time of the token for internal Registry, in minutes.
|
||||
description: The expiration time of the token for internal Registry, in minutes.
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
uaa_client_id:
|
||||
type: string
|
||||
description: The client id of UAA
|
||||
description: The client id of UAA
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
uaa_client_secret:
|
||||
type: string
|
||||
description: The client secret of the UAA
|
||||
description: The client secret of the UAA
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
uaa_endpoint:
|
||||
type: string
|
||||
description: The endpoint of the UAA
|
||||
description: The endpoint of the UAA
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
uaa_verify_cert:
|
||||
type: boolean
|
||||
description: Verify the certificate in UAA server
|
||||
description: Verify the certificate in UAA server
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
http_authproxy_endpoint:
|
||||
type: string
|
||||
description: The endpoint of the HTTP auth
|
||||
description: The endpoint of the HTTP auth
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
http_authproxy_tokenreview_endpoint:
|
||||
type: string
|
||||
description: The token review endpoint
|
||||
description: The token review endpoint
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
http_authproxy_admin_groups:
|
||||
type: string
|
||||
description: The group which has the harbor admin privileges
|
||||
description: The group which has the harbor admin privileges
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
http_authproxy_admin_usernames:
|
||||
type: string
|
||||
description: The username which has the harbor admin privileges
|
||||
description: The username which has the harbor admin privileges
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
http_authproxy_verify_cert:
|
||||
type: boolean
|
||||
description: Verify the HTTP auth provider's certificate
|
||||
description: Verify the HTTP auth provider's certificate
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
http_authproxy_skip_search:
|
||||
type: boolean
|
||||
description: Search user before onboard
|
||||
description: Search user before onboard
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
http_authproxy_server_certificate:
|
||||
type: string
|
||||
description: The certificate of the HTTP auth provider
|
||||
description: The certificate of the HTTP auth provider
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
oidc_name:
|
||||
type: string
|
||||
description: The OIDC provider name
|
||||
description: The OIDC provider name
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
oidc_endpoint:
|
||||
type: string
|
||||
description: The endpoint of the OIDC provider
|
||||
description: The endpoint of the OIDC provider
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
oidc_client_id:
|
||||
type: string
|
||||
description: The client ID of the OIDC provider
|
||||
description: The client ID of the OIDC provider
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
oidc_client_secret:
|
||||
type: string
|
||||
description: The OIDC provider secret
|
||||
description: The OIDC provider secret
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
oidc_groups_claim:
|
||||
type: string
|
||||
description: The attribute claims the group name
|
||||
description: The attribute claims the group name
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
oidc_admin_group:
|
||||
type: string
|
||||
description: The OIDC group which has the harbor admin privileges
|
||||
description: The OIDC group which has the harbor admin privileges
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
oidc_scope:
|
||||
type: string
|
||||
description: The scope of the OIDC provider
|
||||
description: The scope of the OIDC provider
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
oidc_user_claim:
|
||||
type: string
|
||||
description: The attribute claims the username
|
||||
description: The attribute claims the username
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
oidc_verify_cert:
|
||||
type: boolean
|
||||
description: Verify the OIDC provider's certificate'
|
||||
description: Verify the OIDC provider's certificate'
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
oidc_auto_onboard:
|
||||
type: boolean
|
||||
description: Auto onboard the OIDC user
|
||||
description: Auto onboard the OIDC user
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
oidc_extra_redirect_parms:
|
||||
type: string
|
||||
description: Extra parameters to add when redirect request to OIDC provider
|
||||
description: Extra parameters to add when redirect request to OIDC provider
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
robot_token_duration:
|
||||
type: integer
|
||||
description: The robot account token duration in days
|
||||
description: The robot account token duration in days
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
robot_name_prefix:
|
||||
type: string
|
||||
description: The rebot account name prefix
|
||||
description: The rebot account name prefix
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
notification_enable:
|
||||
type: boolean
|
||||
description: Enable notification
|
||||
description: Enable notification
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
quota_per_project_enable:
|
||||
type: boolean
|
||||
description: Enable quota per project
|
||||
description: Enable quota per project
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
storage_per_project:
|
||||
type: integer
|
||||
description: The storage quota per project
|
||||
description: The storage quota per project
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
StringConfigItem:
|
||||
|
@ -129,7 +129,7 @@ log:
|
||||
# port: 5140
|
||||
|
||||
#This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY!
|
||||
_version: 2.3.0
|
||||
_version: 2.4.0
|
||||
|
||||
# Uncomment external_database if using external database.
|
||||
# external_database:
|
||||
@ -199,3 +199,32 @@ proxy:
|
||||
# enabled: false
|
||||
# port: 9090
|
||||
# path: /metrics
|
||||
|
||||
# Trace related config
|
||||
# only can enable one trace provider(jaeger or otel) at the same time,
|
||||
# and when using jaeger as provider, can only enable it with agent mode or collector mode.
|
||||
# if using jaeger collector mode, uncomment endpoint and uncomment username, password if needed
|
||||
# if using jaeger agetn mode uncomment agent_host and agent_port
|
||||
# trace:
|
||||
# enabled: true
|
||||
# # set sample_rate to 1 if you wanna sampling 100% of trace data; set 0.5 if you wanna sampling 50% of trace data, and so forth
|
||||
# sample_rate: 1
|
||||
# # # namespace used to differenciate different harbor services
|
||||
# # namespace:
|
||||
# # # attributes is a key value dict contains user defined attributes used to initialize trace provider
|
||||
# # attributes:
|
||||
# # application: harbor
|
||||
# # # jaeger should be 1.26 or newer.
|
||||
# # jaeger:
|
||||
# # endpoint: http://hostname:14268/api/traces
|
||||
# # username:
|
||||
# # password:
|
||||
# # agent_host: hostname
|
||||
# # # export trace data by jaeger.thrift in compact mode
|
||||
# # agent_port: 6831
|
||||
# # otel:
|
||||
# # endpoint: hostname:4318
|
||||
# # url_path: /v1/traces
|
||||
# # compression: false
|
||||
# # insecure: true
|
||||
# # timeout: 10s
|
||||
|
1
make/migrations/postgresql/0061_2.3.4_schema.up.sql
Normal file
1
make/migrations/postgresql/0061_2.3.4_schema.up.sql
Normal file
@ -0,0 +1 @@
|
||||
UPDATE harbor_user SET email=NULL WHERE email=''
|
@ -1,2 +1,11 @@
|
||||
/* cleanup deleted user project members */
|
||||
DELETE FROM project_member pm WHERE pm.entity_type = 'u' AND EXISTS (SELECT NULL FROM harbor_user u WHERE pm.entity_id = u.user_id AND u.deleted = true )
|
||||
DELETE FROM project_member pm WHERE pm.entity_type = 'u' AND EXISTS (SELECT NULL FROM harbor_user u WHERE pm.entity_id = u.user_id AND u.deleted = true );
|
||||
|
||||
ALTER TABLE replication_policy ADD COLUMN IF NOT EXISTS speed_kb int;
|
||||
|
||||
/* add version fields for lock free quota */
|
||||
ALTER TABLE quota ADD COLUMN IF NOT EXISTS version bigint DEFAULT 0;
|
||||
ALTER TABLE quota_usage ADD COLUMN IF NOT EXISTS version bigint DEFAULT 0;
|
||||
|
||||
/* convert Negligible to None for the severity of the vulnerability record */
|
||||
UPDATE vulnerability_record SET severity='None' WHERE severity='Negligible';
|
||||
|
22
make/migrations/postgresql/0080_2.5.0_schema.up.sql
Normal file
22
make/migrations/postgresql/0080_2.5.0_schema.up.sql
Normal file
@ -0,0 +1,22 @@
|
||||
/* create table of accessory */
|
||||
CREATE TABLE IF NOT EXISTS artifact_accessory (
|
||||
id SERIAL PRIMARY KEY NOT NULL,
|
||||
/*
|
||||
the artifact id of the accessory itself.
|
||||
*/
|
||||
artifact_id bigint,
|
||||
/*
|
||||
the subject artifact id of the accessory.
|
||||
*/
|
||||
subject_artifact_id bigint,
|
||||
/*
|
||||
the type of the accessory, like signature.cosign.
|
||||
*/
|
||||
type varchar(256),
|
||||
size bigint,
|
||||
digest varchar(1024),
|
||||
creation_time timestamp default CURRENT_TIMESTAMP,
|
||||
FOREIGN KEY (artifact_id) REFERENCES artifact(id),
|
||||
FOREIGN KEY (subject_artifact_id) REFERENCES artifact(id),
|
||||
CONSTRAINT unique_artifact_accessory UNIQUE (artifact_id, subject_artifact_id)
|
||||
);
|
179
make/photon/chartserver/454.patch
Normal file
179
make/photon/chartserver/454.patch
Normal file
@ -0,0 +1,179 @@
|
||||
From 66cc2635880193ffb1226e3c790b36eef24cfd8b Mon Sep 17 00:00:00 2001
|
||||
From: scnace <scbizu@gmail.com>
|
||||
Date: Mon, 3 May 2021 15:09:44 +0800
|
||||
Subject: [PATCH 1/2] pkg/chartmuseum/server: add tests for cover duplicate
|
||||
index entry cases
|
||||
|
||||
Signed-off-by: scnace <scbizu@gmail.com>
|
||||
---
|
||||
pkg/chartmuseum/server/multitenant/api.go | 9 +++++----
|
||||
.../server/multitenant/server_test.go | 19 +++++++++++++++++++
|
||||
2 files changed, 24 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/pkg/chartmuseum/server/multitenant/api.go b/pkg/chartmuseum/server/multitenant/api.go
|
||||
index afc2ab5c..2b03d5e3 100644
|
||||
--- a/pkg/chartmuseum/server/multitenant/api.go
|
||||
+++ b/pkg/chartmuseum/server/multitenant/api.go
|
||||
@@ -95,15 +95,15 @@ func (server *MultiTenantServer) deleteChartVersion(log cm_logger.LoggingFn, rep
|
||||
return nil
|
||||
}
|
||||
|
||||
-func (server *MultiTenantServer) uploadChartPackage(log cm_logger.LoggingFn, repo string, content []byte, force bool) (string, *HTTPError ){
|
||||
+func (server *MultiTenantServer) uploadChartPackage(log cm_logger.LoggingFn, repo string, content []byte, force bool) (string, *HTTPError) {
|
||||
filename, err := cm_repo.ChartPackageFilenameFromContent(content)
|
||||
if err != nil {
|
||||
- return filename,&HTTPError{http.StatusInternalServerError, err.Error()}
|
||||
+ return filename, &HTTPError{http.StatusInternalServerError, err.Error()}
|
||||
}
|
||||
|
||||
if pathutil.Base(filename) != filename {
|
||||
// Name wants to break out of current directory
|
||||
- return filename,&HTTPError{http.StatusBadRequest, fmt.Sprintf("%s is improperly formatted", filename)}
|
||||
+ return filename, &HTTPError{http.StatusBadRequest, fmt.Sprintf("%s is improperly formatted", filename)}
|
||||
}
|
||||
|
||||
if !server.AllowOverwrite && (!server.AllowForceOverwrite || !force) {
|
||||
@@ -139,7 +139,8 @@ func (server *MultiTenantServer) uploadChartPackage(log cm_logger.LoggingFn, rep
|
||||
)
|
||||
err = server.StorageBackend.PutObject(pathutil.Join(repo, filename), content)
|
||||
if err != nil {
|
||||
- return filename, &HTTPError{http.StatusInternalServerError, err.Error()} }
|
||||
+ return filename, &HTTPError{http.StatusInternalServerError, err.Error()}
|
||||
+ }
|
||||
return filename, nil
|
||||
}
|
||||
|
||||
diff --git a/pkg/chartmuseum/server/multitenant/server_test.go b/pkg/chartmuseum/server/multitenant/server_test.go
|
||||
index 13050e25..138364f8 100644
|
||||
--- a/pkg/chartmuseum/server/multitenant/server_test.go
|
||||
+++ b/pkg/chartmuseum/server/multitenant/server_test.go
|
||||
@@ -339,6 +339,7 @@ func (suite *MultiTenantServerTestSuite) SetupSuite() {
|
||||
AllowOverwrite: true,
|
||||
ChartPostFormFieldName: "chart",
|
||||
ProvPostFormFieldName: "prov",
|
||||
+ CacheInterval: time.Duration(time.Second),
|
||||
})
|
||||
suite.NotNil(server)
|
||||
suite.Nil(err, "no error creating new overwrite server")
|
||||
@@ -627,6 +628,14 @@ func (suite *MultiTenantServerTestSuite) TestDisabledDeleteServer() {
|
||||
suite.Equal(404, res.Status(), "404 DELETE /api/charts/mychart/0.1.0")
|
||||
}
|
||||
|
||||
+func (suite *MultiTenantServerTestSuite) extractRepoEntryFromInternalCache(repo string) *cacheEntry {
|
||||
+ local, ok := suite.OverwriteServer.InternalCacheStore[repo]
|
||||
+ if ok {
|
||||
+ return local
|
||||
+ }
|
||||
+ return nil
|
||||
+}
|
||||
+
|
||||
func (suite *MultiTenantServerTestSuite) TestOverwriteServer() {
|
||||
// Check if files can be overwritten
|
||||
content, err := ioutil.ReadFile(testTarballPath)
|
||||
@@ -638,6 +647,16 @@ func (suite *MultiTenantServerTestSuite) TestOverwriteServer() {
|
||||
res = suite.doRequest("overwrite", "POST", "/api/charts", body, "")
|
||||
suite.Equal(201, res.Status(), "201 POST /api/charts")
|
||||
|
||||
+ {
|
||||
+ // waiting for the emit event
|
||||
+ // the event is transferred via a channel , here do a simple wait for not changing the original structure
|
||||
+ // only for testing purpose
|
||||
+ time.Sleep(time.Second)
|
||||
+ // depth: 0
|
||||
+ e := suite.extractRepoEntryFromInternalCache("")
|
||||
+ suite.Equal(1, len(e.RepoIndex.Entries), "overwrite entries validation")
|
||||
+ }
|
||||
+
|
||||
content, err = ioutil.ReadFile(testProvfilePath)
|
||||
suite.Nil(err, "no error opening test provenance file")
|
||||
body = bytes.NewBuffer(content)
|
||||
|
||||
From cd2e286da8148a7c114cb45867bf5c7b09e29467 Mon Sep 17 00:00:00 2001
|
||||
From: scnace <scbizu@gmail.com>
|
||||
Date: Mon, 3 May 2021 15:42:00 +0800
|
||||
Subject: [PATCH 2/2] pkg/chartmuseum/server/multitenant: fix the bad action
|
||||
type when upload package when overwrite option is set ,index entry addChart
|
||||
should be updateChart under the overwrite cases.
|
||||
|
||||
Signed-off-by: scnace <scbizu@gmail.com>
|
||||
---
|
||||
pkg/chartmuseum/server/multitenant/api.go | 18 +++++++++++++++---
|
||||
pkg/chartmuseum/server/multitenant/handlers.go | 18 +++++++++++++++---
|
||||
2 files changed, 30 insertions(+), 6 deletions(-)
|
||||
|
||||
diff --git a/pkg/chartmuseum/server/multitenant/api.go b/pkg/chartmuseum/server/multitenant/api.go
|
||||
index 2b03d5e3..902ab7c6 100644
|
||||
--- a/pkg/chartmuseum/server/multitenant/api.go
|
||||
+++ b/pkg/chartmuseum/server/multitenant/api.go
|
||||
@@ -106,11 +106,18 @@ func (server *MultiTenantServer) uploadChartPackage(log cm_logger.LoggingFn, rep
|
||||
return filename, &HTTPError{http.StatusBadRequest, fmt.Sprintf("%s is improperly formatted", filename)}
|
||||
}
|
||||
|
||||
- if !server.AllowOverwrite && (!server.AllowForceOverwrite || !force) {
|
||||
- _, err = server.StorageBackend.GetObject(pathutil.Join(repo, filename))
|
||||
- if err == nil {
|
||||
+ // we should ensure that whether chart is existed even if the `overwrite` option is set
|
||||
+ // For `overwrite` option , here will increase one `storage.GetObject` than before ; others should be equalvarant with the previous version.
|
||||
+ var found bool
|
||||
+ _, err = server.StorageBackend.GetObject(pathutil.Join(repo, filename))
|
||||
+ // found
|
||||
+ if err == nil {
|
||||
+ found = true
|
||||
+ // For those no-overwrite servers, return the Conflict error.
|
||||
+ if !server.AllowOverwrite && (!server.AllowForceOverwrite || !force) {
|
||||
return filename, &HTTPError{http.StatusConflict, "file already exists"}
|
||||
}
|
||||
+ // continue with the `overwrite` servers
|
||||
}
|
||||
|
||||
if server.EnforceSemver2 {
|
||||
@@ -141,6 +148,11 @@ func (server *MultiTenantServer) uploadChartPackage(log cm_logger.LoggingFn, rep
|
||||
if err != nil {
|
||||
return filename, &HTTPError{http.StatusInternalServerError, err.Error()}
|
||||
}
|
||||
+ if found {
|
||||
+ // here is a fake conflict error for outside call
|
||||
+ // In order to not add another return `bool` check (API Compatibility)
|
||||
+ return filename, &HTTPError{http.StatusConflict, ""}
|
||||
+ }
|
||||
return filename, nil
|
||||
}
|
||||
|
||||
diff --git a/pkg/chartmuseum/server/multitenant/handlers.go b/pkg/chartmuseum/server/multitenant/handlers.go
|
||||
index 3e1f0602..c6c31b01 100644
|
||||
--- a/pkg/chartmuseum/server/multitenant/handlers.go
|
||||
+++ b/pkg/chartmuseum/server/multitenant/handlers.go
|
||||
@@ -242,10 +242,22 @@ func (server *MultiTenantServer) postPackageRequestHandler(c *gin.Context) {
|
||||
}
|
||||
log := server.Logger.ContextLoggingFn(c)
|
||||
_, force := c.GetQuery("force")
|
||||
+ action := addChart
|
||||
filename, err := server.uploadChartPackage(log, repo, content, force)
|
||||
if err != nil {
|
||||
- c.JSON(err.Status, gin.H{"error": err.Message})
|
||||
- return
|
||||
+ // here should check both err.Status and err.Message
|
||||
+ // The http.StatusConflict status means the chart is existed but overwrite is not sed OR chart is existed and overwrite is set
|
||||
+ // err.Status == http.StatusConflict only denotes for chart is existed now.
|
||||
+ if err.Status == http.StatusConflict {
|
||||
+ if err.Message != "" {
|
||||
+ c.JSON(err.Status, gin.H{"error": err.Message})
|
||||
+ return
|
||||
+ }
|
||||
+ action = updateChart
|
||||
+ } else {
|
||||
+ c.JSON(err.Status, gin.H{"error": err.Message})
|
||||
+ return
|
||||
+ }
|
||||
}
|
||||
|
||||
chart, chartErr := cm_repo.ChartVersionFromStorageObject(cm_storage.Object{
|
||||
@@ -255,7 +267,7 @@ func (server *MultiTenantServer) postPackageRequestHandler(c *gin.Context) {
|
||||
if chartErr != nil {
|
||||
log(cm_logger.ErrorLevel, "cannot get chart from content", zap.Error(chartErr), zap.Binary("content", content))
|
||||
}
|
||||
- server.emitEvent(c, repo, addChart, chart)
|
||||
+ server.emitEvent(c, repo, action, chart)
|
||||
|
||||
c.JSON(201, objectSavedResponse)
|
||||
}
|
234
make/photon/chartserver/492.patch
Normal file
234
make/photon/chartserver/492.patch
Normal file
@ -0,0 +1,234 @@
|
||||
From 5dd7f0370f73cdffa76707e4f1f715ee4e209f3e Mon Sep 17 00:00:00 2001
|
||||
From: DQ <dengq@vmware.com>
|
||||
Date: Fri, 24 Sep 2021 17:56:00 +0000
|
||||
Subject: [PATCH 1/2] Fix duplicate versions for same chart
|
||||
|
||||
* The detailed issue is described in #450
|
||||
* And there is a PR #454 fixed one scenario of this issue
|
||||
* But there is another ocassion in which users upload chart with prov
|
||||
* in this PR is to handle this situation with the way similar with #454
|
||||
|
||||
Signed-off-by: DQ <dengq@vmware.com>
|
||||
---
|
||||
.../server/multitenant/handlers.go | 55 +++++++++++++------
|
||||
.../server/multitenant/server_test.go | 7 +++
|
||||
2 files changed, 46 insertions(+), 16 deletions(-)
|
||||
|
||||
diff --git a/pkg/chartmuseum/server/multitenant/handlers.go b/pkg/chartmuseum/server/multitenant/handlers.go
|
||||
index c6c31b0..a39a00d 100644
|
||||
--- a/pkg/chartmuseum/server/multitenant/handlers.go
|
||||
+++ b/pkg/chartmuseum/server/multitenant/handlers.go
|
||||
@@ -299,8 +299,24 @@ func (server *MultiTenantServer) postPackageAndProvenanceRequestHandler(c *gin.C
|
||||
_, force := c.GetQuery("force")
|
||||
var chartContent []byte
|
||||
var path string
|
||||
+ // action used to determine what operation to emit
|
||||
+ action := addChart
|
||||
cpFiles, status, err := server.getChartAndProvFiles(c.Request, repo, force)
|
||||
- if status != 200 {
|
||||
+ if err != nil {
|
||||
+ c.JSON(status, gin.H{"error": fmt.Sprintf("%s", err)})
|
||||
+ return
|
||||
+ }
|
||||
+ switch status {
|
||||
+ case http.StatusOK:
|
||||
+ case http.StatusConflict:
|
||||
+ if !server.AllowOverwrite && (!server.AllowForceOverwrite || !force) {
|
||||
+ c.JSON(status, gin.H{"error": fmt.Sprintf("%s", fmt.Errorf("chart already exists"))}) // conflict
|
||||
+ return
|
||||
+ }
|
||||
+ log(cm_logger.DebugLevel, "chart already exists, but overwrite is allowed", zap.String("repo", repo))
|
||||
+ // update chart if chart already exists and overwrite is allowed
|
||||
+ action = updateChart
|
||||
+ default:
|
||||
c.JSON(status, gin.H{"error": fmt.Sprintf("%s", err)})
|
||||
return
|
||||
}
|
||||
@@ -309,7 +325,7 @@ func (server *MultiTenantServer) postPackageAndProvenanceRequestHandler(c *gin.C
|
||||
if len(c.Errors) > 0 {
|
||||
return // this is a "request too large"
|
||||
}
|
||||
- c.JSON(400, gin.H{"error": fmt.Sprintf(
|
||||
+ c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf(
|
||||
"no package or provenance file found in form fields %s and %s",
|
||||
server.ChartPostFormFieldName, server.ProvPostFormFieldName),
|
||||
})
|
||||
@@ -332,7 +348,7 @@ func (server *MultiTenantServer) postPackageAndProvenanceRequestHandler(c *gin.C
|
||||
for _, ppf := range storedFiles {
|
||||
server.StorageBackend.DeleteObject(ppf.filename)
|
||||
}
|
||||
- c.JSON(500, gin.H{"error": fmt.Sprintf("%s", err)})
|
||||
+ c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("%s", err)})
|
||||
return
|
||||
}
|
||||
if ppf.field == defaultFormField {
|
||||
@@ -350,9 +366,9 @@ func (server *MultiTenantServer) postPackageAndProvenanceRequestHandler(c *gin.C
|
||||
log(cm_logger.ErrorLevel, "cannot get chart from content", zap.Error(err), zap.Binary("content", chartContent))
|
||||
}
|
||||
|
||||
- server.emitEvent(c, repo, addChart, chart)
|
||||
+ server.emitEvent(c, repo, action, chart)
|
||||
|
||||
- c.JSON(201, objectSavedResponse)
|
||||
+ c.JSON(http.StatusCreated, objectSavedResponse)
|
||||
}
|
||||
|
||||
func (server *MultiTenantServer) getChartAndProvFiles(req *http.Request, repo string, force bool) (map[string]*chartOrProvenanceFile, int, error) {
|
||||
@@ -368,29 +384,36 @@ func (server *MultiTenantServer) getChartAndProvFiles(req *http.Request, repo st
|
||||
{server.ProvPostFormFieldName, cm_repo.ProvenanceFilenameFromContent},
|
||||
}
|
||||
|
||||
+ validStatusCode := http.StatusOK
|
||||
cpFiles := make(map[string]*chartOrProvenanceFile)
|
||||
for _, ff := range ffp {
|
||||
content, err := extractContentFromRequest(req, ff.field)
|
||||
if err != nil {
|
||||
- return nil, 500, err
|
||||
+ return nil, http.StatusInternalServerError, err
|
||||
}
|
||||
if content == nil {
|
||||
continue
|
||||
}
|
||||
filename, err := ff.fn(content)
|
||||
if err != nil {
|
||||
- return nil, 400, err
|
||||
+ return nil, http.StatusBadRequest, err
|
||||
}
|
||||
if _, ok := cpFiles[filename]; ok {
|
||||
continue
|
||||
}
|
||||
- if status, err := server.validateChartOrProv(repo, filename, force); err != nil {
|
||||
+ status, err := server.validateChartOrProv(repo, filename, force)
|
||||
+ if err != nil {
|
||||
return nil, status, err
|
||||
}
|
||||
+ // return conflict status code if the file already exists
|
||||
+ if status == http.StatusConflict && validStatusCode != http.StatusConflict {
|
||||
+ validStatusCode = status
|
||||
+ }
|
||||
cpFiles[filename] = &chartOrProvenanceFile{filename, content, ff.field}
|
||||
}
|
||||
|
||||
- return cpFiles, 200, nil
|
||||
+ // validState code can be 200 or 409. Returning 409 means that the chart already exists
|
||||
+ return cpFiles, validStatusCode, nil
|
||||
}
|
||||
|
||||
func extractContentFromRequest(req *http.Request, field string) ([]byte, error) {
|
||||
@@ -408,7 +431,7 @@ func extractContentFromRequest(req *http.Request, field string) ([]byte, error)
|
||||
|
||||
func (server *MultiTenantServer) validateChartOrProv(repo, filename string, force bool) (int, error) {
|
||||
if pathutil.Base(filename) != filename {
|
||||
- return 400, fmt.Errorf("%s is improperly formatted", filename) // Name wants to break out of current directory
|
||||
+ return http.StatusBadRequest, fmt.Errorf("%s is improperly formatted", filename) // Name wants to break out of current directory
|
||||
}
|
||||
|
||||
var f string
|
||||
@@ -417,11 +440,11 @@ func (server *MultiTenantServer) validateChartOrProv(repo, filename string, forc
|
||||
} else {
|
||||
f = repo + "/" + filename
|
||||
}
|
||||
- if !server.AllowOverwrite && (!server.AllowForceOverwrite || !force) {
|
||||
- _, err := server.StorageBackend.GetObject(f)
|
||||
- if err == nil {
|
||||
- return 409, fmt.Errorf("%s already exists", f) // conflict
|
||||
- }
|
||||
+ // conflict does not mean the file is invalid.
|
||||
+ // for example, when overwite is allowed, it's valid
|
||||
+ // so that the client can decide what to do and here we just return conflict with no error
|
||||
+ if _, err := server.StorageBackend.GetObject(f); err == nil {
|
||||
+ return http.StatusConflict, nil
|
||||
}
|
||||
- return 200, nil
|
||||
+ return http.StatusOK, nil
|
||||
}
|
||||
diff --git a/pkg/chartmuseum/server/multitenant/server_test.go b/pkg/chartmuseum/server/multitenant/server_test.go
|
||||
index 138364f..477f349 100644
|
||||
--- a/pkg/chartmuseum/server/multitenant/server_test.go
|
||||
+++ b/pkg/chartmuseum/server/multitenant/server_test.go
|
||||
@@ -672,6 +672,13 @@ func (suite *MultiTenantServerTestSuite) TestOverwriteServer() {
|
||||
buf, w = suite.getBodyWithMultipartFormFiles([]string{"chart", "prov"}, []string{testTarballPath, testProvfilePath})
|
||||
res = suite.doRequest("overwrite", "POST", "/api/charts", buf, w.FormDataContentType())
|
||||
suite.Equal(201, res.Status(), "201 POST /api/charts")
|
||||
+ {
|
||||
+ // the same as chart only case above
|
||||
+ time.Sleep(time.Second)
|
||||
+ // depth: 0
|
||||
+ e := suite.extractRepoEntryFromInternalCache("")
|
||||
+ suite.Equal(1, len(e.RepoIndex.Entries), "overwrite entries validation")
|
||||
+ }
|
||||
}
|
||||
|
||||
func (suite *MultiTenantServerTestSuite) TestBadChartUpload() {
|
||||
|
||||
From 1ecdaa5811178f4d4d6d1cc8077c354cc8f5859f Mon Sep 17 00:00:00 2001
|
||||
From: DQ <dengq@vmware.com>
|
||||
Date: Thu, 30 Sep 2021 13:54:30 +0000
|
||||
Subject: [PATCH 2/2] Enhance: optimize loop in `getChartAndProvFiles`
|
||||
|
||||
* If conflict, it didn't need to do the left logic, just return the file
|
||||
* move out file format check logic out of `validateChartOrProv`
|
||||
* these changes are discussed in https://github.com/helm/chartmuseum/pull/492#discussion_r716032288
|
||||
|
||||
Signed-off-by: DQ <dengq@vmware.com>
|
||||
---
|
||||
.../server/multitenant/handlers.go | 22 ++++++++++++-------
|
||||
1 file changed, 14 insertions(+), 8 deletions(-)
|
||||
|
||||
diff --git a/pkg/chartmuseum/server/multitenant/handlers.go b/pkg/chartmuseum/server/multitenant/handlers.go
|
||||
index a39a00d..fbaf450 100644
|
||||
--- a/pkg/chartmuseum/server/multitenant/handlers.go
|
||||
+++ b/pkg/chartmuseum/server/multitenant/handlers.go
|
||||
@@ -384,7 +384,7 @@ func (server *MultiTenantServer) getChartAndProvFiles(req *http.Request, repo st
|
||||
{server.ProvPostFormFieldName, cm_repo.ProvenanceFilenameFromContent},
|
||||
}
|
||||
|
||||
- validStatusCode := http.StatusOK
|
||||
+ validReturnStatusCode := http.StatusOK
|
||||
cpFiles := make(map[string]*chartOrProvenanceFile)
|
||||
for _, ff := range ffp {
|
||||
content, err := extractContentFromRequest(req, ff.field)
|
||||
@@ -401,19 +401,29 @@ func (server *MultiTenantServer) getChartAndProvFiles(req *http.Request, repo st
|
||||
if _, ok := cpFiles[filename]; ok {
|
||||
continue
|
||||
}
|
||||
+ // if the file already exists, we don't need to validate it again
|
||||
+ if validReturnStatusCode == http.StatusConflict {
|
||||
+ cpFiles[filename] = &chartOrProvenanceFile{filename, content, ff.field}
|
||||
+ continue
|
||||
+ }
|
||||
+ // check filename
|
||||
+ if pathutil.Base(filename) != filename {
|
||||
+ return nil, http.StatusBadRequest, fmt.Errorf("%s is improperly formatted", filename) // Name wants to break out of current directory
|
||||
+ }
|
||||
+ // check existence
|
||||
status, err := server.validateChartOrProv(repo, filename, force)
|
||||
if err != nil {
|
||||
return nil, status, err
|
||||
}
|
||||
// return conflict status code if the file already exists
|
||||
- if status == http.StatusConflict && validStatusCode != http.StatusConflict {
|
||||
- validStatusCode = status
|
||||
+ if status == http.StatusConflict {
|
||||
+ validReturnStatusCode = status
|
||||
}
|
||||
cpFiles[filename] = &chartOrProvenanceFile{filename, content, ff.field}
|
||||
}
|
||||
|
||||
// validState code can be 200 or 409. Returning 409 means that the chart already exists
|
||||
- return cpFiles, validStatusCode, nil
|
||||
+ return cpFiles, validReturnStatusCode, nil
|
||||
}
|
||||
|
||||
func extractContentFromRequest(req *http.Request, field string) ([]byte, error) {
|
||||
@@ -430,10 +440,6 @@ func extractContentFromRequest(req *http.Request, field string) ([]byte, error)
|
||||
}
|
||||
|
||||
func (server *MultiTenantServer) validateChartOrProv(repo, filename string, force bool) (int, error) {
|
||||
- if pathutil.Base(filename) != filename {
|
||||
- return http.StatusBadRequest, fmt.Errorf("%s is improperly formatted", filename) // Name wants to break out of current directory
|
||||
- }
|
||||
-
|
||||
var f string
|
||||
if repo == "" {
|
||||
f = filename
|
@ -4,7 +4,7 @@ set +e
|
||||
|
||||
usage(){
|
||||
echo "Usage: builder <golang image:version> <code path> <code release tag> <main.go path> <binary name>"
|
||||
echo "e.g: builder golang:1.16.5 github.com/helm/chartmuseum v0.12.0 cmd/chartmuseum chartm"
|
||||
echo "e.g: builder golang:1.17.2 github.com/helm/chartmuseum v0.12.0 cmd/chartmuseum chartm"
|
||||
exit 1
|
||||
}
|
||||
|
||||
|
@ -5,6 +5,4 @@ set -e
|
||||
/home/chart/install_cert.sh
|
||||
|
||||
#Start the server process
|
||||
/home/chart/chartm
|
||||
|
||||
set +e
|
||||
exec /home/chart/chartm
|
||||
|
@ -4,4 +4,4 @@ set -e
|
||||
|
||||
/harbor/install_cert.sh
|
||||
|
||||
/harbor/harbor_core
|
||||
exec /harbor/harbor_core
|
||||
|
@ -12,7 +12,7 @@ function checkdep {
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! bzip2 --version &> /dev/null
|
||||
if ! [ -x "$(command -v bzip2)" ]
|
||||
then
|
||||
echo "Need to install bzip2 first and run this script again."
|
||||
exit 1
|
||||
|
@ -4,4 +4,4 @@ set -e
|
||||
|
||||
/harbor/install_cert.sh
|
||||
|
||||
/harbor/harbor_exporter
|
||||
exec /harbor/harbor_exporter
|
||||
|
@ -4,4 +4,4 @@ set -e
|
||||
|
||||
/harbor/install_cert.sh
|
||||
|
||||
/harbor/harbor_jobservice -c /etc/jobservice/config.yml
|
||||
exec /harbor/harbor_jobservice -c /etc/jobservice/config.yml
|
||||
|
@ -2,5 +2,6 @@
|
||||
|
||||
# run the logrotate with user 10000, the state file "/var/lib/logrotate/logrotate.status"
|
||||
# is specified to avoid the permission error
|
||||
cd /
|
||||
sudo -u \#10000 -E /usr/sbin/logrotate -s /var/lib/logrotate/logrotate.status /etc/logrotate.conf
|
||||
exit 0
|
||||
exit 0
|
||||
|
@ -1,4 +1,4 @@
|
||||
import os, sys, importlib, shutil, glob
|
||||
import os, sys, shutil, glob
|
||||
from packaging import version
|
||||
|
||||
import click
|
||||
@ -10,7 +10,7 @@ from migrations import accept_versions
|
||||
@click.command()
|
||||
@click.option('-i', '--input', 'input_', required=True, help="The path of original config file")
|
||||
@click.option('-o', '--output', default='', help="the path of output config file")
|
||||
@click.option('-t', '--target', default='2.3.0', help="target version of input path")
|
||||
@click.option('-t', '--target', default='2.4.0', help="target version of input path")
|
||||
def migrate(input_, output, target):
|
||||
"""
|
||||
migrate command will migrate config file style to specific version
|
||||
|
@ -2,4 +2,4 @@ import os
|
||||
|
||||
MIGRATION_BASE_DIR = os.path.dirname(__file__)
|
||||
|
||||
accept_versions = {'1.9.0', '1.10.0', '2.0.0', '2.1.0', '2.2.0', '2.3.0'}
|
||||
accept_versions = {'1.9.0', '1.10.0', '2.0.0', '2.1.0', '2.2.0', '2.3.0', '2.4.0'}
|
20
make/photon/prepare/migrations/version_2_4_0/__init__.py
Normal file
20
make/photon/prepare/migrations/version_2_4_0/__init__.py
Normal file
@ -0,0 +1,20 @@
|
||||
import os
|
||||
from jinja2 import Environment, FileSystemLoader, StrictUndefined
|
||||
from utils.migration import read_conf
|
||||
|
||||
revision = '2.4.0'
|
||||
down_revisions = ['2.3.0']
|
||||
|
||||
def migrate(input_cfg, output_cfg):
|
||||
current_dir = os.path.dirname(__file__)
|
||||
tpl = Environment(
|
||||
loader=FileSystemLoader(current_dir),
|
||||
undefined=StrictUndefined,
|
||||
trim_blocks=True,
|
||||
lstrip_blocks=True
|
||||
).get_template('harbor.yml.jinja')
|
||||
|
||||
config_dict = read_conf(input_cfg)
|
||||
|
||||
with open(output_cfg, 'w') as f:
|
||||
f.write(tpl.render(**config_dict))
|
520
make/photon/prepare/migrations/version_2_4_0/harbor.yml.jinja
Normal file
520
make/photon/prepare/migrations/version_2_4_0/harbor.yml.jinja
Normal file
@ -0,0 +1,520 @@
|
||||
# Configuration file of Harbor
|
||||
|
||||
# The IP address or hostname to access admin UI and registry service.
|
||||
# DO NOT use localhost or 127.0.0.1, because Harbor needs to be accessed by external clients.
|
||||
hostname: {{ hostname }}
|
||||
|
||||
# http related config
|
||||
{% if http is defined %}
|
||||
http:
|
||||
# port for http, default is 80. If https enabled, this port will redirect to https port
|
||||
port: {{ http.port }}
|
||||
{% else %}
|
||||
# http:
|
||||
# # port for http, default is 80. If https enabled, this port will redirect to https port
|
||||
# port: 80
|
||||
{% endif %}
|
||||
|
||||
{% if https is defined %}
|
||||
# https related config
|
||||
https:
|
||||
# https port for harbor, default is 443
|
||||
port: {{ https.port }}
|
||||
# The path of cert and key files for nginx
|
||||
certificate: {{ https.certificate }}
|
||||
private_key: {{ https.private_key }}
|
||||
{% else %}
|
||||
# https related config
|
||||
# https:
|
||||
# # https port for harbor, default is 443
|
||||
# port: 443
|
||||
# # The path of cert and key files for nginx
|
||||
# certificate: /your/certificate/path
|
||||
# private_key: /your/private/key/path
|
||||
{% endif %}
|
||||
|
||||
{% if internal_tls is defined %}
|
||||
# Uncomment following will enable tls communication between all harbor components
|
||||
internal_tls:
|
||||
# set enabled to true means internal tls is enabled
|
||||
enabled: {{ internal_tls.enabled | lower }}
|
||||
# put your cert and key files on dir
|
||||
dir: {{ internal_tls.dir }}
|
||||
{% else %}
|
||||
# internal_tls:
|
||||
# # set enabled to true means internal tls is enabled
|
||||
# enabled: true
|
||||
# # put your cert and key files on dir
|
||||
# dir: /etc/harbor/tls/internal
|
||||
{% endif %}
|
||||
|
||||
# Uncomment external_url if you want to enable external proxy
|
||||
# And when it enabled the hostname will no longer used
|
||||
{% if external_url is defined %}
|
||||
external_url: {{ external_url }}
|
||||
{% else %}
|
||||
# external_url: https://reg.mydomain.com:8433
|
||||
{% endif %}
|
||||
|
||||
# The initial password of Harbor admin
|
||||
# It only works in first time to install harbor
|
||||
# Remember Change the admin password from UI after launching Harbor.
|
||||
{% if harbor_admin_password is defined %}
|
||||
harbor_admin_password: {{ harbor_admin_password }}
|
||||
{% else %}
|
||||
harbor_admin_password: Harbor12345
|
||||
{% endif %}
|
||||
|
||||
# Harbor DB configuration
|
||||
database:
|
||||
{% if database is defined %}
|
||||
# The password for the root user of Harbor DB. Change this before any production use.
|
||||
password: {{ database.password}}
|
||||
# The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained.
|
||||
max_idle_conns: {{ database.max_idle_conns }}
|
||||
# The maximum number of open connections to the database. If it <= 0, then there is no limit on the number of open connections.
|
||||
# Note: the default number of connections is 1024 for postgres of harbor.
|
||||
max_open_conns: {{ database.max_open_conns }}
|
||||
{% else %}
|
||||
# The password for the root user of Harbor DB. Change this before any production use.
|
||||
password: root123
|
||||
# The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained.
|
||||
max_idle_conns: 100
|
||||
# The maximum number of open connections to the database. If it <= 0, then there is no limit on the number of open connections.
|
||||
# Note: the default number of connections is 1024 for postgres of harbor.
|
||||
max_open_conns: 900
|
||||
{% endif %}
|
||||
|
||||
{% if data_volume is defined %}
|
||||
# The default data volume
|
||||
data_volume: {{ data_volume }}
|
||||
{% else %}
|
||||
# The default data volume
|
||||
data_volume: /data
|
||||
{% endif %}
|
||||
|
||||
# Harbor Storage settings by default is using /data dir on local filesystem
|
||||
# Uncomment storage_service setting If you want to using external storage
|
||||
{% if storage_service is defined %}
|
||||
storage_service:
|
||||
{% for key, value in storage_service.items() %}
|
||||
{% if key == 'ca_bundle' %}
|
||||
# # ca_bundle is the path to the custom root ca certificate, which will be injected into the truststore
|
||||
# # of registry's and chart repository's containers. This is usually needed when the user hosts a internal storage with self signed certificate.
|
||||
ca_bundle: {{ value if value is not none else '' }}
|
||||
{% elif key == 'redirect' %}
|
||||
# # set disable to true when you want to disable registry redirect
|
||||
redirect:
|
||||
disabled: {{ value.disabled }}
|
||||
{% else %}
|
||||
# # storage backend, default is filesystem, options include filesystem, azure, gcs, s3, swift and oss
|
||||
# # for more info about this configuration please refer https://docs.docker.com/registry/configuration/
|
||||
{{ key }}:
|
||||
{% for k, v in value.items() %}
|
||||
{{ k }}: {{ v if v is not none else '' }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% else %}
|
||||
# Harbor Storage settings by default is using /data dir on local filesystem
|
||||
# Uncomment storage_service setting If you want to using external storage
|
||||
# storage_service:
|
||||
# # ca_bundle is the path to the custom root ca certificate, which will be injected into the truststore
|
||||
# # of registry's and chart repository's containers. This is usually needed when the user hosts a internal storage with self signed certificate.
|
||||
# ca_bundle:
|
||||
|
||||
# # storage backend, default is filesystem, options include filesystem, azure, gcs, s3, swift and oss
|
||||
# # for more info about this configuration please refer https://docs.docker.com/registry/configuration/
|
||||
# filesystem:
|
||||
# maxthreads: 100
|
||||
# # set disable to true when you want to disable registry redirect
|
||||
# redirect:
|
||||
# disabled: false
|
||||
{% endif %}
|
||||
|
||||
# Trivy configuration
|
||||
#
|
||||
# Trivy DB contains vulnerability information from NVD, Red Hat, and many other upstream vulnerability databases.
|
||||
# It is downloaded by Trivy from the GitHub release page https://github.com/aquasecurity/trivy-db/releases and cached
|
||||
# in the local file system. In addition, the database contains the update timestamp so Trivy can detect whether it
|
||||
# should download a newer version from the Internet or use the cached one. Currently, the database is updated every
|
||||
# 12 hours and published as a new release to GitHub.
|
||||
{% if trivy is defined %}
|
||||
trivy:
|
||||
# ignoreUnfixed The flag to display only fixed vulnerabilities
|
||||
{% if trivy.ignore_unfixed is defined %}
|
||||
ignore_unfixed: {{ trivy.ignore_unfixed | lower }}
|
||||
{% else %}
|
||||
ignore_unfixed: false
|
||||
{% endif %}
|
||||
# timeout The duration to wait for scan completion
|
||||
{% if trivy.timeout is defined %}
|
||||
timeout: {{ trivy.timeout }}
|
||||
{% else %}
|
||||
timeout: 5m0s
|
||||
{% endif %}
|
||||
# skipUpdate The flag to enable or disable Trivy DB downloads from GitHub
|
||||
#
|
||||
# You might want to enable this flag in test or CI/CD environments to avoid GitHub rate limiting issues.
|
||||
# If the flag is enabled you have to download the `trivy-offline.tar.gz` archive manually, extract `trivy.db` and
|
||||
# `metadata.json` files and mount them in the `/home/scanner/.cache/trivy/db` path.
|
||||
{% if trivy.skip_update is defined %}
|
||||
skip_update: {{ trivy.skip_update | lower }}
|
||||
{% else %}
|
||||
skip_update: false
|
||||
{% endif %}
|
||||
#
|
||||
# insecure The flag to skip verifying registry certificate
|
||||
{% if trivy.insecure is defined %}
|
||||
insecure: {{ trivy.insecure | lower }}
|
||||
{% else %}
|
||||
insecure: false
|
||||
{% endif %}
|
||||
# github_token The GitHub access token to download Trivy DB
|
||||
#
|
||||
# Anonymous downloads from GitHub are subject to the limit of 60 requests per hour. Normally such rate limit is enough
|
||||
# for production operations. If, for any reason, it's not enough, you could increase the rate limit to 5000
|
||||
# requests per hour by specifying the GitHub access token. For more details on GitHub rate limiting please consult
|
||||
# https://developer.github.com/v3/#rate-limiting
|
||||
#
|
||||
# You can create a GitHub token by following the instructions in
|
||||
# https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line
|
||||
#
|
||||
{% if trivy.github_token is defined %}
|
||||
github_token: {{ trivy.github_token }}
|
||||
{% else %}
|
||||
# github_token: xxx
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# trivy:
|
||||
# # ignoreUnfixed The flag to display only fixed vulnerabilities
|
||||
# ignore_unfixed: false
|
||||
# # skipUpdate The flag to enable or disable Trivy DB downloads from GitHub
|
||||
# #
|
||||
# # You might want to enable this flag in test or CI/CD environments to avoid GitHub rate limiting issues.
|
||||
# # If the flag is enabled you have to download the `trivy-offline.tar.gz` archive manually, extract `trivy.db` and
|
||||
# # `metadata.json` files and mount them in the `/home/scanner/.cache/trivy/db` path.
|
||||
# skip_update: false
|
||||
# #
|
||||
# # insecure The flag to skip verifying registry certificate
|
||||
# insecure: false
|
||||
# # github_token The GitHub access token to download Trivy DB
|
||||
# #
|
||||
# # Anonymous downloads from GitHub are subject to the limit of 60 requests per hour. Normally such rate limit is enough
|
||||
# # for production operations. If, for any reason, it's not enough, you could increase the rate limit to 5000
|
||||
# # requests per hour by specifying the GitHub access token. For more details on GitHub rate limiting please consult
|
||||
# # https://developer.github.com/v3/#rate-limiting
|
||||
# #
|
||||
# # You can create a GitHub token by following the instructions in
|
||||
# # https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line
|
||||
# #
|
||||
# # github_token: xxx
|
||||
{% endif %}
|
||||
|
||||
jobservice:
|
||||
# Maximum number of job workers in job service
|
||||
{% if jobservice is defined %}
|
||||
max_job_workers: {{ jobservice.max_job_workers }}
|
||||
{% else %}
|
||||
max_job_workers: 10
|
||||
{% endif %}
|
||||
|
||||
notification:
|
||||
# Maximum retry count for webhook job
|
||||
{% if notification is defined %}
|
||||
webhook_job_max_retry: {{ notification.webhook_job_max_retry}}
|
||||
{% else %}
|
||||
webhook_job_max_retry: 10
|
||||
{% endif %}
|
||||
|
||||
{% if chart is defined %}
|
||||
chart:
|
||||
# Change the value of absolute_url to enabled can enable absolute url in chart
|
||||
absolute_url: {{ chart.absolute_url if chart.absolute_url == 'enabled' else 'disabled' }}
|
||||
{% else %}
|
||||
chart:
|
||||
# Change the value of absolute_url to enabled can enable absolute url in chart
|
||||
absolute_url: disabled
|
||||
{% endif %}
|
||||
|
||||
# Log configurations
|
||||
log:
|
||||
# options are debug, info, warning, error, fatal
|
||||
{% if log is defined %}
|
||||
level: {{ log.level }}
|
||||
# configs for logs in local storage
|
||||
local:
|
||||
# Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated.
|
||||
rotate_count: {{ log.local.rotate_count }}
|
||||
# Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes.
|
||||
# If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G
|
||||
# are all valid.
|
||||
rotate_size: {{ log.local.rotate_size }}
|
||||
# The directory on your host that store log
|
||||
location: {{ log.local.location }}
|
||||
{% if log.external_endpoint is defined %}
|
||||
external_endpoint:
|
||||
# protocol used to transmit log to external endpoint, options is tcp or udp
|
||||
protocol: {{ log.external_endpoint.protocol }}
|
||||
# The host of external endpoint
|
||||
host: {{ log.external_endpoint.host }}
|
||||
# Port of external endpoint
|
||||
port: {{ log.external_endpoint.port }}
|
||||
{% else %}
|
||||
# Uncomment following lines to enable external syslog endpoint.
|
||||
# external_endpoint:
|
||||
# # protocol used to transmit log to external endpoint, options is tcp or udp
|
||||
# protocol: tcp
|
||||
# # The host of external endpoint
|
||||
# host: localhost
|
||||
# # Port of external endpoint
|
||||
# port: 5140
|
||||
{% endif %}
|
||||
{% else %}
|
||||
level: info
|
||||
# configs for logs in local storage
|
||||
local:
|
||||
# Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated.
|
||||
rotate_count: 50
|
||||
# Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes.
|
||||
# If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G
|
||||
# are all valid.
|
||||
rotate_size: 200M
|
||||
# The directory on your host that store log
|
||||
location: /var/log/harbor
|
||||
|
||||
# Uncomment following lines to enable external syslog endpoint.
|
||||
# external_endpoint:
|
||||
# # protocol used to transmit log to external endpoint, options is tcp or udp
|
||||
# protocol: tcp
|
||||
# # The host of external endpoint
|
||||
# host: localhost
|
||||
# # Port of external endpoint
|
||||
# port: 5140
|
||||
{% endif %}
|
||||
|
||||
|
||||
#This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY!
|
||||
_version: 2.4.0
|
||||
{% if external_database is defined %}
|
||||
# Uncomment external_database if using external database.
|
||||
external_database:
|
||||
harbor:
|
||||
host: {{ external_database.harbor.host }}
|
||||
port: {{ external_database.harbor.port }}
|
||||
db_name: {{ external_database.harbor.db_name }}
|
||||
username: {{ external_database.harbor.username }}
|
||||
password: {{ external_database.harbor.password }}
|
||||
ssl_mode: {{ external_database.harbor.ssl_mode }}
|
||||
max_idle_conns: {{ external_database.harbor.max_idle_conns}}
|
||||
max_open_conns: {{ external_database.harbor.max_open_conns}}
|
||||
notary_signer:
|
||||
host: {{ external_database.notary_signer.host }}
|
||||
port: {{ external_database.notary_signer.port }}
|
||||
db_name: {{external_database.notary_signer.db_name }}
|
||||
username: {{ external_database.notary_signer.username }}
|
||||
password: {{ external_database.notary_signer.password }}
|
||||
ssl_mode: {{ external_database.notary_signer.ssl_mode }}
|
||||
notary_server:
|
||||
host: {{ external_database.notary_server.host }}
|
||||
port: {{ external_database.notary_server.port }}
|
||||
db_name: {{ external_database.notary_server.db_name }}
|
||||
username: {{ external_database.notary_server.username }}
|
||||
password: {{ external_database.notary_server.password }}
|
||||
ssl_mode: {{ external_database.notary_server.ssl_mode }}
|
||||
{% else %}
|
||||
# Uncomment external_database if using external database.
|
||||
# external_database:
|
||||
# harbor:
|
||||
# host: harbor_db_host
|
||||
# port: harbor_db_port
|
||||
# db_name: harbor_db_name
|
||||
# username: harbor_db_username
|
||||
# password: harbor_db_password
|
||||
# ssl_mode: disable
|
||||
# max_idle_conns: 2
|
||||
# max_open_conns: 0
|
||||
# notary_signer:
|
||||
# host: notary_signer_db_host
|
||||
# port: notary_signer_db_port
|
||||
# db_name: notary_signer_db_name
|
||||
# username: notary_signer_db_username
|
||||
# password: notary_signer_db_password
|
||||
# ssl_mode: disable
|
||||
# notary_server:
|
||||
# host: notary_server_db_host
|
||||
# port: notary_server_db_port
|
||||
# db_name: notary_server_db_name
|
||||
# username: notary_server_db_username
|
||||
# password: notary_server_db_password
|
||||
# ssl_mode: disable
|
||||
{% endif %}
|
||||
|
||||
{% if external_redis is defined %}
|
||||
external_redis:
|
||||
# support redis, redis+sentinel
|
||||
# host for redis: <host_redis>:<port_redis>
|
||||
# host for redis+sentinel:
|
||||
# <host_sentinel1>:<port_sentinel1>,<host_sentinel2>:<port_sentinel2>,<host_sentinel3>:<port_sentinel3>
|
||||
host: {{ external_redis.host }}
|
||||
password: {{ external_redis.password }}
|
||||
# sentinel_master_set must be set to support redis+sentinel
|
||||
#sentinel_master_set:
|
||||
# db_index 0 is for core, it's unchangeable
|
||||
registry_db_index: {{ external_redis.registry_db_index }}
|
||||
jobservice_db_index: {{ external_redis.jobservice_db_index }}
|
||||
chartmuseum_db_index: {{ external_redis.chartmuseum_db_index }}
|
||||
trivy_db_index: 5
|
||||
idle_timeout_seconds: 30
|
||||
{% else %}
|
||||
# Umcomments external_redis if using external Redis server
|
||||
# external_redis:
|
||||
# # support redis, redis+sentinel
|
||||
# # host for redis: <host_redis>:<port_redis>
|
||||
# # host for redis+sentinel:
|
||||
# # <host_sentinel1>:<port_sentinel1>,<host_sentinel2>:<port_sentinel2>,<host_sentinel3>:<port_sentinel3>
|
||||
# host: redis:6379
|
||||
# password:
|
||||
# # sentinel_master_set must be set to support redis+sentinel
|
||||
# #sentinel_master_set:
|
||||
# # db_index 0 is for core, it's unchangeable
|
||||
# registry_db_index: 1
|
||||
# jobservice_db_index: 2
|
||||
# chartmuseum_db_index: 3
|
||||
# trivy_db_index: 5
|
||||
# idle_timeout_seconds: 30
|
||||
{% endif %}
|
||||
|
||||
{% if uaa is defined %}
|
||||
# Uncomment uaa for trusting the certificate of uaa instance that is hosted via self-signed cert.
|
||||
uaa:
|
||||
ca_file: {{ uaa.ca_file }}
|
||||
{% else %}
|
||||
# Uncomment uaa for trusting the certificate of uaa instance that is hosted via self-signed cert.
|
||||
# uaa:
|
||||
# ca_file: /path/to/ca
|
||||
{% endif %}
|
||||
|
||||
|
||||
# Global proxy
|
||||
# Config http proxy for components, e.g. http://my.proxy.com:3128
|
||||
# Components doesn't need to connect to each others via http proxy.
|
||||
# Remove component from `components` array if want disable proxy
|
||||
# for it. If you want use proxy for replication, MUST enable proxy
|
||||
# for core and jobservice, and set `http_proxy` and `https_proxy`.
|
||||
# Add domain to the `no_proxy` field, when you want disable proxy
|
||||
# for some special registry.
|
||||
{% if proxy is defined %}
|
||||
proxy:
|
||||
http_proxy: {{ proxy.http_proxy or ''}}
|
||||
https_proxy: {{ proxy.https_proxy or ''}}
|
||||
no_proxy: {{ proxy.no_proxy or ''}}
|
||||
{% if proxy.components is defined %}
|
||||
components:
|
||||
{% for component in proxy.components %}
|
||||
{% if component != 'clair' %}
|
||||
- {{component}}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% else %}
|
||||
proxy:
|
||||
http_proxy:
|
||||
https_proxy:
|
||||
no_proxy:
|
||||
components:
|
||||
- core
|
||||
- jobservice
|
||||
- trivy
|
||||
{% endif %}
|
||||
|
||||
{% if metric is defined %}
|
||||
metric:
|
||||
enabled: {{ metric.enabled }}
|
||||
port: {{ metric.port }}
|
||||
path: {{ metric.path }}
|
||||
{% else %}
|
||||
# metric:
|
||||
# enabled: false
|
||||
# port: 9090
|
||||
# path: /metric
|
||||
{% endif %}
|
||||
|
||||
# Trace related config
|
||||
# only can enable one trace provider(jaeger or otel) at the same time,
|
||||
# and when using jaeger as provider, can only enable it with agent mode or collector mode.
|
||||
# if using jaeger collector mode, uncomment endpoint and uncomment username, password if needed
|
||||
# if using jaeger agetn mode uncomment agent_host and agent_port
|
||||
{% if trace is defined %}
|
||||
trace:
|
||||
enabled: {{ trace.enabled | lower}}
|
||||
sample_rate: {{ trace.sample_rate }}
|
||||
# # namespace used to diferenciate different harbor services
|
||||
{% if trace.namespace is defined %}
|
||||
namespace: {{ trace.namespace }}
|
||||
{% else %}
|
||||
# namespace:
|
||||
{% endif %}
|
||||
# # attributes is a key value dict contains user defined attributes used to initialize trace provider
|
||||
{% if trace.attributes is defined%}
|
||||
attributes:
|
||||
{% for name, value in trace.attributes.items() %}
|
||||
{{name}}: {{value}}
|
||||
{% endfor %}
|
||||
{% else %}
|
||||
# attributes:
|
||||
# application: harbor
|
||||
{% endif %}
|
||||
{% if trace.jaeger is defined%}
|
||||
jaeger:
|
||||
endpoint: {{trace.jaeger.endpoint or '' }}
|
||||
username: {{trace.jaeger.username or ''}}
|
||||
password: {{trace.jaeger.password or ''}}
|
||||
agent_host: {{trace.jaeger.agent_host or ''}}
|
||||
agent_port: {{trace.jaeger.agent_port or ''}}
|
||||
{% else %}
|
||||
# jaeger:
|
||||
# endpoint:
|
||||
# username:
|
||||
# password:
|
||||
# agent_host:
|
||||
# agent_port:
|
||||
{% endif %}
|
||||
{% if trace. otel is defined %}
|
||||
otel:
|
||||
endpoint: {{trace.otel.endpoint or '' }}
|
||||
url_path: {{trace.otel.url_path or '' }}
|
||||
compression: {{trace.otel.compression | lower }}
|
||||
insecure: {{trace.otel.insecure | lower }}
|
||||
timeout: {{trace.otel.timeout or '' }}
|
||||
{% else %}
|
||||
# otel:
|
||||
# endpoint: hostname:4318
|
||||
# url_path: /v1/traces
|
||||
# compression: false
|
||||
# insecure: true
|
||||
# timeout: 10s
|
||||
{% endif%}
|
||||
{% else %}
|
||||
# trace:
|
||||
# enabled: true
|
||||
# # set sample_rate to 1 if you wanna sampling 100% of trace data; set 0.5 if you wanna sampling 50% of trace data, and so forth
|
||||
# sample_rate: 1
|
||||
# # # namespace used to differenciate different harbor services
|
||||
# # namespace:
|
||||
# # # attributes is a key value dict contains user defined attributes used to initialize trace provider
|
||||
# # attributes:
|
||||
# # application: harbor
|
||||
# # jaeger:
|
||||
# # endpoint: http://hostname:14268/api/traces
|
||||
# # username:
|
||||
# # password:
|
||||
# # agent_host: hostname
|
||||
# # agent_port: 6832
|
||||
# # otel:
|
||||
# # endpoint: hostname:4318
|
||||
# # url_path: /v1/traces
|
||||
# # compression: false
|
||||
# # insecure: true
|
||||
# # timeout: 10s
|
||||
{% endif %}
|
@ -103,11 +103,10 @@ class InternalTLS:
|
||||
return
|
||||
raise Exception('cert file {} should include SAN'.format(filename))
|
||||
|
||||
|
||||
def validate(self) -> bool:
|
||||
def validate(self):
|
||||
if not self.enabled:
|
||||
# pass the validation if not enabled
|
||||
return True
|
||||
return
|
||||
|
||||
if not internal_tls_dir.exists():
|
||||
raise Exception('Internal dir for tls {} not exist'.format(internal_tls_dir))
|
||||
@ -115,8 +114,6 @@ class InternalTLS:
|
||||
for filename in self.required_filenames:
|
||||
self._check(filename)
|
||||
|
||||
return True
|
||||
|
||||
def prepare(self):
|
||||
"""
|
||||
Prepare moves certs in tls file to data volume with correct permission.
|
||||
@ -140,7 +137,6 @@ class InternalTLS:
|
||||
else:
|
||||
os.chown(file, DEFAULT_UID, DEFAULT_GID)
|
||||
|
||||
|
||||
class Metric:
|
||||
def __init__(self, enabled: bool = False, port: int = 8080, path: str = "metrics" ):
|
||||
self.enabled = enabled
|
||||
@ -149,4 +145,62 @@ class Metric:
|
||||
|
||||
def validate(self):
|
||||
if not port_number_valid(self.port):
|
||||
raise Exception('Port number in metrics is not valid')
|
||||
raise Exception('Port number in metrics is not valid')
|
||||
|
||||
|
||||
class JaegerExporter:
|
||||
def __init__(self, config: dict):
|
||||
if not config:
|
||||
self.enabled = False
|
||||
return
|
||||
self.enabled = True
|
||||
self.endpoint = config.get('endpoint')
|
||||
self.username = config.get('username')
|
||||
self.password = config.get('password')
|
||||
self.agent_host = config.get('agent_host')
|
||||
self.agent_port = config.get('agent_port')
|
||||
|
||||
def validate(self):
|
||||
if not self.endpoint and not self.agent_host:
|
||||
raise Exception('Jaeger Colector Endpoint or Agent host not set, must set one')
|
||||
if self.endpoint and self.agent_host:
|
||||
raise Exception('Jaeger Colector Endpoint and Agent host both set, only can set one')
|
||||
|
||||
class OtelExporter:
|
||||
def __init__(self, config: dict):
|
||||
if not config:
|
||||
self.enabled = False
|
||||
return
|
||||
self.enabled = True
|
||||
self.endpoint = config.get('endpoint')
|
||||
self.url_path = config.get('url_path')
|
||||
self.compression = config.get('compression') or False
|
||||
self.insecure = config.get('insecure') or False
|
||||
self.timeout = config.get('timeout') or '10s'
|
||||
|
||||
def validate(self):
|
||||
if not self.endpoint:
|
||||
raise Exception('Trace endpoint not set')
|
||||
if not self.url_path:
|
||||
raise Exception('Trace url path not set')
|
||||
|
||||
class Trace:
|
||||
def __init__(self, config: dict):
|
||||
self.enabled = config.get('enabled') or False
|
||||
self.sample_rate = config.get('sample_rate', 1)
|
||||
self.namespace = config.get('namespace') or ''
|
||||
self.jaeger = JaegerExporter(config.get('jaeger'))
|
||||
self.otel = OtelExporter(config.get('otel'))
|
||||
self.attributes = config.get('attributes') or {}
|
||||
|
||||
def validate(self):
|
||||
if not self.enabled:
|
||||
return
|
||||
if not self.jaeger.enabled and not self.otel.enabled:
|
||||
raise Exception('Trace enabled but no trace exporter set')
|
||||
elif self.jaeger.enabled and self.otel.enabled:
|
||||
raise Exception('Only can have one trace exporter at a time')
|
||||
elif self.jaeger.enabled:
|
||||
self.jaeger.validate()
|
||||
elif self.otel.enabled:
|
||||
self.otel.validate()
|
||||
|
@ -62,3 +62,25 @@ METRIC_PORT={{ metric.port }}
|
||||
METRIC_NAMESPACE=harbor
|
||||
METRIC_SUBSYSTEM=core
|
||||
{% endif %}
|
||||
|
||||
{% if trace.enabled %}
|
||||
TRACE_ENABLED=true
|
||||
TRACE_SERVICE_NAME=harbor-core
|
||||
TRACE_SAMPLE_RATE={{ trace.sample_rate }}
|
||||
TRACE_NAMESPACE={{ trace.namespace }}
|
||||
TRACE_ATTRIBUTES={{ trace.attributes | to_json | safe }}
|
||||
{% if trace.jaeger.enabled %}
|
||||
TRACE_JAEGER_ENDPOINT={{ trace.jaeger.endpoint if trace.jaeger.endpoint else '' }}
|
||||
TRACE_JAEGER_USERNAME={{ trace.jaeger.username if trace.jaeger.username else '' }}
|
||||
TRACE_JAEGER_PASSWORD={{ trace.jaeger.password if trace.jaeger.password else '' }}
|
||||
TRACE_JAEGER_AGENT_HOSTNAME={{ trace.jaeger.agent_host if trace.jaeger.agent_host else '' }}
|
||||
TRACE_JAEGER_AGENT_PORT={{ trace.jaeger.agent_port if trace.jaeger.agent_port else '' }}
|
||||
{% endif %}
|
||||
{%if trace.otel.enabled %}
|
||||
TRACE_OTEL_ENDPOINT={{ trace.otel.endpoint }}
|
||||
TRACE_OTEL_URL_PATH={{ trace.otel.url_path if trace.otel.url_path else '' }}
|
||||
TRACE_OTEL_COMPRESSION={{ trace.otel.compression }}
|
||||
TRACE_OTEL_TIMEOUT={{ trace.otel.timeout }}
|
||||
TRACE_OTEL_INSECURE={{ trace.otel.insecure }}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
@ -4,7 +4,6 @@ services:
|
||||
image: goharbor/harbor-log:{{version}}
|
||||
container_name: harbor-log
|
||||
restart: always
|
||||
dns_search: .
|
||||
cap_drop:
|
||||
- ALL
|
||||
cap_add:
|
||||
@ -61,7 +60,6 @@ services:
|
||||
{% endif %}
|
||||
networks:
|
||||
- harbor
|
||||
dns_search: .
|
||||
depends_on:
|
||||
- log
|
||||
logging:
|
||||
@ -105,7 +103,6 @@ services:
|
||||
{% endif %}
|
||||
networks:
|
||||
- harbor
|
||||
dns_search: .
|
||||
depends_on:
|
||||
- log
|
||||
logging:
|
||||
@ -134,7 +131,6 @@ services:
|
||||
aliases:
|
||||
- harbor-db
|
||||
{% endif %}
|
||||
dns_search: .
|
||||
env_file:
|
||||
- ./common/config/db/env
|
||||
depends_on:
|
||||
@ -196,7 +192,6 @@ services:
|
||||
aliases:
|
||||
- harbor-core
|
||||
{% endif %}
|
||||
dns_search: .
|
||||
depends_on:
|
||||
- log
|
||||
- registry
|
||||
@ -236,7 +231,6 @@ services:
|
||||
{% endif %}
|
||||
networks:
|
||||
- harbor
|
||||
dns_search: .
|
||||
depends_on:
|
||||
- log
|
||||
logging:
|
||||
@ -275,7 +269,6 @@ services:
|
||||
{% endif %}
|
||||
networks:
|
||||
- harbor
|
||||
dns_search: .
|
||||
depends_on:
|
||||
- core
|
||||
logging:
|
||||
@ -303,7 +296,6 @@ services:
|
||||
aliases:
|
||||
- redis
|
||||
{% endif %}
|
||||
dns_search: .
|
||||
depends_on:
|
||||
- log
|
||||
logging:
|
||||
@ -344,7 +336,6 @@ services:
|
||||
{% if with_notary %}
|
||||
- harbor-notary
|
||||
{% endif %}
|
||||
dns_search: .
|
||||
ports:
|
||||
- {{http_port}}:8080
|
||||
{% if protocol == 'https' %}
|
||||
@ -374,7 +365,6 @@ services:
|
||||
networks:
|
||||
- notary-sig
|
||||
- harbor-notary
|
||||
dns_search: .
|
||||
volumes:
|
||||
- ./common/config/notary:/etc/notary:z
|
||||
- type: bind
|
||||
@ -415,7 +405,6 @@ services:
|
||||
notary-sig:
|
||||
aliases:
|
||||
- notarysigner
|
||||
dns_search: .
|
||||
volumes:
|
||||
- ./common/config/notary:/etc/notary:z
|
||||
- type: bind
|
||||
@ -455,7 +444,6 @@ services:
|
||||
restart: always
|
||||
cap_drop:
|
||||
- ALL
|
||||
dns_search: .
|
||||
depends_on:
|
||||
- log
|
||||
{% if external_redis == False %}
|
||||
@ -503,7 +491,6 @@ services:
|
||||
- SETUID
|
||||
networks:
|
||||
- harbor-chartmuseum
|
||||
dns_search: .
|
||||
depends_on:
|
||||
- log
|
||||
volumes:
|
||||
@ -542,7 +529,6 @@ services:
|
||||
restart: always
|
||||
networks:
|
||||
- harbor
|
||||
dns_search: .
|
||||
depends_on:
|
||||
- core
|
||||
{% if external_database == False %}
|
||||
|
@ -25,3 +25,25 @@ REGISTRY_CREDENTIAL_PASSWORD={{registry_password}}
|
||||
METRIC_NAMESPACE=harbor
|
||||
METRIC_SUBSYSTEM=jobservice
|
||||
{% endif %}
|
||||
|
||||
{% if trace.enabled %}
|
||||
TRACE_ENABLED=true
|
||||
TRACE_SERVICE_NAME=harbor-jobservice
|
||||
TRACE_SAMPLE_RATE={{ trace.sample_rate }}
|
||||
TRACE_NAMESPACE={{ trace.namespace }}
|
||||
TRACE_ATTRIBUTES={{ trace.attributes | to_json | safe }}
|
||||
{% if trace.jaeger.enabled %}
|
||||
TRACE_JAEGER_ENDPOINT={{ trace.jaeger.endpoint if trace.jaeger.endpoint else '' }}
|
||||
TRACE_JAEGER_USERNAME={{ trace.jaeger.username if trace.jaeger.username else '' }}
|
||||
TRACE_JAEGER_PASSWORD={{ trace.jaeger.password if trace.jaeger.password else '' }}
|
||||
TRACE_JAEGER_AGENT_HOSTNAME={{ trace.jaeger.agent_host if trace.jaeger.agent_host else '' }}
|
||||
TRACE_JAEGER_AGENT_PORT={{ trace.jaeger.agent_port if trace.jaeger.agent_port else '' }}
|
||||
{% endif %}
|
||||
{%if trace.otel.enabled %}
|
||||
TRACE_OTEL_ENDPOINT={{ trace.otel.endpoint }}
|
||||
TRACE_OTEL_URL_PATH={{ trace.otel.url_path if trace.otel.url_path else '' }}
|
||||
TRACE_OTEL_COMPRESSION={{ trace.otel.compression }}
|
||||
TRACE_OTEL_TIMEOUT={{ trace.otel.timeout }}
|
||||
TRACE_OTEL_INSECURE={{ trace.otel.insecure }}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
@ -9,3 +9,24 @@ INTERNAL_TLS_CERT_PATH=/etc/harbor/ssl/registryctl.crt
|
||||
{% if internal_tls.verify_client_cert %}
|
||||
INTERNAL_VERIFY_CLIENT_CERT=true
|
||||
{% endif %}
|
||||
{% if trace.enabled %}
|
||||
TRACE_ENABLED=true
|
||||
TRACE_SERVICE_NAME=harbor-registryctl
|
||||
TRACE_SAMPLE_RATE={{ trace.sample_rate }}
|
||||
TRACE_NAMESPACE={{ trace.namespace }}
|
||||
TRACE_ATTRIBUTES={{ trace.attributes | to_json | safe }}
|
||||
{% if trace.jaeger.enabled %}
|
||||
TRACE_JAEGER_ENDPOINT={{ trace.jaeger.endpoint if trace.jaeger.endpoint else '' }}
|
||||
TRACE_JAEGER_USERNAME={{ trace.jaeger.username if trace.jaeger.username else '' }}
|
||||
TRACE_JAEGER_PASSWORD={{ trace.jaeger.password if trace.jaeger.password else '' }}
|
||||
TRACE_JAEGER_AGENT_HOSTNAME={{ trace.jaeger.agent_host if trace.jaeger.agent_host else '' }}
|
||||
TRACE_JAEGER_AGENT_PORT={{ trace.jaeger.agent_port if trace.jaeger.agent_port else '' }}
|
||||
{% endif %}
|
||||
{%if trace.otel.enabled %}
|
||||
TRACE_OTEL_ENDPOINT={{ trace.otel.endpoint }}
|
||||
TRACE_OTEL_URL_PATH={{ trace.otel.url_path if trace.otel.url_path else '' }}
|
||||
TRACE_OTEL_COMPRESSION={{ trace.otel.compression }}
|
||||
TRACE_OTEL_TIMEOUT={{ trace.otel.timeout }}
|
||||
TRACE_OTEL_INSECURE={{ trace.otel.insecure }}
|
||||
{% endif %}
|
||||
{% endif %}
|
@ -12,6 +12,7 @@ SCANNER_TRIVY_IGNORE_UNFIXED={{trivy_ignore_unfixed}}
|
||||
SCANNER_TRIVY_SKIP_UPDATE={{trivy_skip_update}}
|
||||
SCANNER_TRIVY_GITHUB_TOKEN={{trivy_github_token}}
|
||||
SCANNER_TRIVY_INSECURE={{trivy_insecure}}
|
||||
SCANNER_TRIVY_TIMEOUT={{trivy_timeout}}
|
||||
HTTP_PROXY={{trivy_http_proxy}}
|
||||
HTTPS_PROXY={{trivy_https_proxy}}
|
||||
NO_PROXY={{trivy_no_proxy}}
|
||||
|
@ -3,7 +3,7 @@ import os
|
||||
import yaml
|
||||
from urllib.parse import urlencode
|
||||
from g import versions_file_path, host_root_dir, DEFAULT_UID, INTERNAL_NO_PROXY_DN
|
||||
from models import InternalTLS, Metric
|
||||
from models import InternalTLS, Metric, Trace
|
||||
from utils.misc import generate_random_string, owner_can_read, other_can_read
|
||||
|
||||
default_db_max_idle_conns = 2 # NOTE: https://golang.org/pkg/database/sql/#DB.SetMaxIdleConns
|
||||
@ -76,6 +76,8 @@ def validate(conf: dict, **kwargs):
|
||||
# TODO:
|
||||
# If user enable trust cert dir, need check if the files in this dir is readable.
|
||||
|
||||
if conf.get('trace'):
|
||||
conf['trace'].validate()
|
||||
|
||||
def parse_versions():
|
||||
if not versions_file_path.is_file():
|
||||
@ -219,6 +221,7 @@ def parse_yaml_config(config_file_path, with_notary, with_trivy, with_chartmuseu
|
||||
config_dict['trivy_skip_update'] = trivy_configs.get("skip_update") or False
|
||||
config_dict['trivy_ignore_unfixed'] = trivy_configs.get("ignore_unfixed") or False
|
||||
config_dict['trivy_insecure'] = trivy_configs.get("insecure") or False
|
||||
config_dict['trivy_timeout'] = trivy_configs.get("timeout") or '5m0s'
|
||||
|
||||
# Chart configs
|
||||
chart_configs = configs.get("chart") or {}
|
||||
@ -327,6 +330,10 @@ def parse_yaml_config(config_file_path, with_notary, with_trivy, with_chartmuseu
|
||||
else:
|
||||
config_dict['metric'] = Metric()
|
||||
|
||||
# trace configs
|
||||
trace_config = configs.get('trace')
|
||||
config_dict['trace'] = Trace(trace_config or {})
|
||||
|
||||
if config_dict['internal_tls'].enabled:
|
||||
config_dict['portal_url'] = 'https://portal:8443'
|
||||
config_dict['registry_url'] = 'https://registry:5443'
|
||||
|
@ -1,8 +1,16 @@
|
||||
import json
|
||||
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
from .misc import mark_file
|
||||
|
||||
jinja_env = Environment(loader=FileSystemLoader('/'), trim_blocks=True, lstrip_blocks=True)
|
||||
|
||||
def to_json(value):
|
||||
return json.dumps(value)
|
||||
|
||||
jinja_env.filters['to_json'] = to_json
|
||||
|
||||
|
||||
def render_jinja(src, dest,mode=0o640, uid=0, gid=0, **kw):
|
||||
t = jinja_env.get_template(src)
|
||||
with open(dest, 'w') as f:
|
||||
|
@ -1,4 +1,4 @@
|
||||
import os, shutil
|
||||
import os
|
||||
|
||||
from g import config_dir, templates_dir, DEFAULT_GID, DEFAULT_UID
|
||||
from utils.misc import prepare_dir
|
||||
|
@ -1,4 +1,4 @@
|
||||
FROM golang:1.16.5
|
||||
FROM golang:1.17.2
|
||||
|
||||
ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution
|
||||
ENV BUILDTAGS include_oss include_gcs
|
||||
|
@ -10,4 +10,4 @@ set -e
|
||||
|
||||
/home/harbor/install_cert.sh
|
||||
|
||||
/usr/bin/registry_DO_NOT_USE_GC serve /etc/registry/config.yml
|
||||
exec /usr/bin/registry_DO_NOT_USE_GC serve /etc/registry/config.yml
|
||||
|
@ -1,4 +1,4 @@
|
||||
FROM golang:1.16.5
|
||||
FROM golang:1.17.2
|
||||
|
||||
ADD . /go/src/github.com/aquasecurity/harbor-scanner-trivy/
|
||||
WORKDIR /go/src/github.com/aquasecurity/harbor-scanner-trivy/
|
||||
|
@ -19,7 +19,7 @@ TEMP=$(mktemp -d ${TMPDIR-/tmp}/trivy-adapter.XXXXXX)
|
||||
git clone https://github.com/aquasecurity/harbor-scanner-trivy.git $TEMP
|
||||
cd $TEMP; git checkout $VERSION; cd -
|
||||
|
||||
echo "Building Trivy adapter binary based on golang:1.16.5..."
|
||||
echo "Building Trivy adapter binary based on golang:1.17.2..."
|
||||
cp Dockerfile.binary $TEMP
|
||||
docker build -f $TEMP/Dockerfile.binary -t trivy-adapter-golang $TEMP
|
||||
|
||||
|
@ -4,4 +4,4 @@ set -e
|
||||
|
||||
/home/scanner/install_cert.sh
|
||||
|
||||
/home/scanner/bin/scanner-trivy
|
||||
exec /home/scanner/bin/scanner-trivy
|
||||
|
@ -10,8 +10,11 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
|
||||
|
||||
commonhttp "github.com/goharbor/harbor/src/common/http"
|
||||
"github.com/goharbor/harbor/src/lib/errors"
|
||||
tracelib "github.com/goharbor/harbor/src/lib/trace"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -22,7 +25,7 @@ const (
|
||||
|
||||
var (
|
||||
once sync.Once
|
||||
chartTransport *http.Transport
|
||||
chartTransport http.RoundTripper
|
||||
)
|
||||
|
||||
// ChartClient is a http client to get the content from the external http server
|
||||
@ -38,9 +41,13 @@ type ChartClient struct {
|
||||
// credential can be nil
|
||||
func NewChartClient(credential *Credential) *ChartClient { // Create http client with customized timeouts
|
||||
once.Do(func() {
|
||||
chartTransport = commonhttp.GetHTTPTransport(commonhttp.SecureTransport).Clone()
|
||||
chartTransport.MaxIdleConns = maxIdleConnections
|
||||
chartTransport.IdleConnTimeout = idleConnectionTimeout
|
||||
chartTransport = commonhttp.NewTransport(
|
||||
commonhttp.WithMaxIdleConns(maxIdleConnections),
|
||||
commonhttp.WithIdleconnectionTimeout(idleConnectionTimeout),
|
||||
)
|
||||
if tracelib.Enabled() {
|
||||
chartTransport = otelhttp.NewTransport(chartTransport)
|
||||
}
|
||||
})
|
||||
|
||||
client := &http.Client{
|
||||
|
@ -53,7 +53,7 @@ func NewProxyEngine(target *url.URL, cred *Credential, middlewares ...func(http.
|
||||
director(target, cred, req)
|
||||
},
|
||||
ModifyResponse: modifyResponse,
|
||||
Transport: commonhttp.GetHTTPTransport(commonhttp.SecureTransport),
|
||||
Transport: commonhttp.GetHTTPTransport(),
|
||||
}
|
||||
|
||||
if len(middlewares) > 0 {
|
||||
|
@ -1,11 +1,12 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/goharbor/harbor/src/common/models"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/goharbor/harbor/src/common/models"
|
||||
|
||||
_ "github.com/lib/pq"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/spf13/viper"
|
||||
@ -43,7 +44,7 @@ func main() {
|
||||
HarborHost: viper.GetString("service.host"),
|
||||
HarborPort: viper.GetInt("service.port"),
|
||||
Client: &http.Client{
|
||||
Transport: commonthttp.GetHTTPTransport(commonthttp.SecureTransport),
|
||||
Transport: commonthttp.GetHTTPTransport(),
|
||||
},
|
||||
})
|
||||
|
||||
|
@ -169,4 +169,31 @@ const (
|
||||
MetricEnable = "metric_enable"
|
||||
MetricPort = "metric_port"
|
||||
MetricPath = "metric_path"
|
||||
|
||||
// Trace setting items
|
||||
TraceEnabled = "trace_enabled"
|
||||
TraceServiceName = "trace_service_name"
|
||||
TraceSampleRate = "trace_sample_rate"
|
||||
TraceNamespace = "trace_namespace"
|
||||
TraceAttributes = "trace_attribute"
|
||||
TraceJaegerEndpoint = "trace_jaeger_endpoint"
|
||||
TraceJaegerUsername = "trace_jaeger_username"
|
||||
TraceJaegerPassword = "trace_jaeger_password"
|
||||
TraceJaegerAgentHost = "trace_jaeger_agent_host"
|
||||
TraceJaegerAgentPort = "trace_jaeger_agent_port"
|
||||
TraceOtelEndpoint = "trace_otel_endpoint"
|
||||
TraceOtelURLPath = "trace_otel_url_path"
|
||||
TraceOtelCompression = "trace_otel_compression"
|
||||
TraceOtelInsecure = "trace_otel_insecure"
|
||||
TraceOtelTimeout = "trace_otel_timeout"
|
||||
|
||||
// These variables are temporary solution for issue: https://github.com/goharbor/harbor/issues/16039
|
||||
// When user disable the pull count/time/audit log, it will decrease the database access, especially in large concurrency pull scenarios.
|
||||
// TODO: Once we have a complete solution, delete these variables.
|
||||
// PullCountUpdateDisable indicate if pull count is disable for pull request.
|
||||
PullCountUpdateDisable = "pull_count_update_disable"
|
||||
// PullTimeUpdateDisable indicate if pull time is disable for pull request.
|
||||
PullTimeUpdateDisable = "pull_time_update_disable"
|
||||
// PullAuditLogDisable indicate if pull audit log is disable for pull request.
|
||||
PullAuditLogDisable = "pull_audit_log_disable"
|
||||
)
|
||||
|
@ -18,6 +18,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
proModels "github.com/goharbor/harbor/src/pkg/project/models"
|
||||
userModels "github.com/goharbor/harbor/src/pkg/user/models"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
@ -111,7 +112,7 @@ func ClearTable(table string) error {
|
||||
if table == proModels.ProjectTable {
|
||||
sql = fmt.Sprintf("delete from %s where project_id > 1", table)
|
||||
}
|
||||
if table == models.UserTable {
|
||||
if table == userModels.UserTable {
|
||||
sql = fmt.Sprintf("delete from %s where user_id > 2", table)
|
||||
}
|
||||
if table == "project_member" { // make sure admin in library
|
||||
|
@ -16,12 +16,14 @@ package dao
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/goharbor/harbor/src/common/models"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/goharbor/harbor/src/common/models"
|
||||
|
||||
"github.com/astaxie/beego/orm"
|
||||
"github.com/goharbor/harbor/src/common/utils"
|
||||
"github.com/goharbor/harbor/src/lib/log"
|
||||
@ -74,7 +76,7 @@ func NewPGSQL(host string, port string, usr string, pwd string, database string,
|
||||
|
||||
// Register registers pgSQL to orm with the info wrapped by the instance.
|
||||
func (p *pgsql) Register(alias ...string) error {
|
||||
if err := utils.TestTCPConn(fmt.Sprintf("%s:%s", p.host, p.port), 60, 2); err != nil {
|
||||
if err := utils.TestTCPConn(net.JoinHostPort(p.host, p.port), 60, 2); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -105,13 +107,13 @@ func (p *pgsql) Register(alias ...string) error {
|
||||
|
||||
// UpgradeSchema calls migrate tool to upgrade schema to the latest based on the SQL scripts.
|
||||
func (p *pgsql) UpgradeSchema() error {
|
||||
port, err := strconv.ParseInt(p.port, 10, 64)
|
||||
port, err := strconv.Atoi(p.port)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m, err := NewMigrator(&models.PostGreSQL{
|
||||
Host: p.host,
|
||||
Port: int(port),
|
||||
Port: port,
|
||||
Username: p.usr,
|
||||
Password: p.pwd,
|
||||
Database: p.database,
|
||||
@ -142,7 +144,7 @@ func NewMigrator(database *models.PostGreSQL) (*migrate.Migrate, error) {
|
||||
dbURL := url.URL{
|
||||
Scheme: "postgres",
|
||||
User: url.UserPassword(database.Username, database.Password),
|
||||
Host: fmt.Sprintf("%s:%d", database.Host, database.Port),
|
||||
Host: net.JoinHostPort(database.Host, strconv.Itoa(database.Port)),
|
||||
Path: database.Database,
|
||||
RawQuery: fmt.Sprintf("sslmode=%s", database.SSLMode),
|
||||
}
|
||||
|
@ -135,12 +135,6 @@ func ExecuteBatchSQL(sqls []string) {
|
||||
}
|
||||
}
|
||||
|
||||
// CleanUser - Clean this user information from DB, this is a shortcut for UT.
|
||||
func CleanUser(id int64) error {
|
||||
_, err := GetOrmer().QueryTable(&models.User{}).Filter("UserID", id).Delete()
|
||||
return err
|
||||
}
|
||||
|
||||
// ArrayEqual ...
|
||||
func ArrayEqual(arrayA, arrayB []int) bool {
|
||||
if len(arrayA) != len(arrayB) {
|
||||
|
@ -16,65 +16,18 @@ package http
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/goharbor/harbor/src/common/http/modifier"
|
||||
"github.com/goharbor/harbor/src/lib"
|
||||
)
|
||||
|
||||
const (
|
||||
// InsecureTransport used to get the insecure http Transport
|
||||
InsecureTransport = iota
|
||||
// SecureTransport used to get the external secure http Transport
|
||||
SecureTransport
|
||||
)
|
||||
|
||||
var (
|
||||
secureHTTPTransport *http.Transport
|
||||
insecureHTTPTransport *http.Transport
|
||||
)
|
||||
|
||||
func init() {
|
||||
secureHTTPTransport = newDefaultTransport()
|
||||
insecureHTTPTransport = newDefaultTransport()
|
||||
insecureHTTPTransport.TLSClientConfig.InsecureSkipVerify = true
|
||||
|
||||
if InternalTLSEnabled() {
|
||||
tlsConfig, err := GetInternalTLSConfig()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
secureHTTPTransport.TLSClientConfig = tlsConfig
|
||||
}
|
||||
}
|
||||
|
||||
// Use this instead of Default Transport in library because it sets ForceAttemptHTTP2 to true
|
||||
// And that options introduced in go 1.13 will cause the https requests hang forever in replication environment
|
||||
func newDefaultTransport() *http.Transport {
|
||||
return &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
DualStack: true,
|
||||
}).DialContext,
|
||||
TLSClientConfig: &tls.Config{},
|
||||
MaxIdleConns: 100,
|
||||
IdleConnTimeout: 90 * time.Second,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
ExpectContinueTimeout: 1 * time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
// Client is a util for common HTTP operations, such Get, Head, Post, Put and Delete.
|
||||
// Use Do instead if those methods can not meet your requirement
|
||||
type Client struct {
|
||||
@ -87,27 +40,6 @@ func (c *Client) GetClient() *http.Client {
|
||||
return c.client
|
||||
}
|
||||
|
||||
// GetHTTPTransport returns HttpTransport based on insecure configuration
|
||||
func GetHTTPTransport(clientType uint) *http.Transport {
|
||||
switch clientType {
|
||||
case SecureTransport:
|
||||
return secureHTTPTransport
|
||||
case InsecureTransport:
|
||||
return insecureHTTPTransport
|
||||
default:
|
||||
// default Transport is secure one
|
||||
return secureHTTPTransport
|
||||
}
|
||||
}
|
||||
|
||||
// GetHTTPTransportByInsecure returns a insecure HttpTransport if insecure is true or it returns secure one
|
||||
func GetHTTPTransportByInsecure(insecure bool) *http.Transport {
|
||||
if insecure {
|
||||
return insecureHTTPTransport
|
||||
}
|
||||
return secureHTTPTransport
|
||||
}
|
||||
|
||||
// NewClient creates an instance of Client.
|
||||
// Use net/http.Client as the default value if c is nil.
|
||||
// Modifiers modify the request before sending it.
|
||||
@ -117,7 +49,7 @@ func NewClient(c *http.Client, modifiers ...modifier.Modifier) *Client {
|
||||
}
|
||||
if client.client == nil {
|
||||
client.client = &http.Client{
|
||||
Transport: GetHTTPTransport(SecureTransport),
|
||||
Transport: GetHTTPTransport(),
|
||||
}
|
||||
}
|
||||
if len(modifiers) > 0 {
|
||||
@ -294,6 +226,14 @@ func (c *Client) GetAndIteratePagination(endpoint string, v interface{}) error {
|
||||
for _, link := range links {
|
||||
if link.Rel == "next" {
|
||||
endpoint = url.Scheme + "://" + url.Host + link.URL
|
||||
url, err = url.Parse(endpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// encode the query parameters to avoid bad request
|
||||
// e.g. ?q=name={p1 p2 p3} need to be encoded to ?q=name%3D%7Bp1+p2+p3%7D
|
||||
url.RawQuery = url.Query().Encode()
|
||||
endpoint = url.String()
|
||||
break
|
||||
}
|
||||
}
|
||||
|
@ -1,14 +0,0 @@
|
||||
package http
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestGetHTTPTransport(t *testing.T) {
|
||||
transport := GetHTTPTransport(InsecureTransport)
|
||||
assert.True(t, transport.TLSClientConfig.InsecureSkipVerify)
|
||||
transport = GetHTTPTransport(SecureTransport)
|
||||
assert.False(t, transport.TLSClientConfig.InsecureSkipVerify)
|
||||
}
|
137
src/common/http/transport.go
Normal file
137
src/common/http/transport.go
Normal file
@ -0,0 +1,137 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package http
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"github.com/goharbor/harbor/src/lib/trace"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
|
||||
)
|
||||
|
||||
const (
|
||||
// InsecureTransport used to get the insecure http Transport
|
||||
InsecureTransport = iota
|
||||
// SecureTransport used to get the external secure http Transport
|
||||
SecureTransport
|
||||
)
|
||||
|
||||
var (
|
||||
secureHTTPTransport http.RoundTripper
|
||||
insecureHTTPTransport http.RoundTripper
|
||||
)
|
||||
|
||||
func init() {
|
||||
insecureHTTPTransport = NewTransport(WithInsecureSkipVerify(true))
|
||||
if InternalTLSEnabled() {
|
||||
secureHTTPTransport = NewTransport(WithInternalTLSConfig())
|
||||
} else {
|
||||
secureHTTPTransport = NewTransport()
|
||||
}
|
||||
}
|
||||
|
||||
func AddTracingWithGlobalTransport() {
|
||||
insecureHTTPTransport = otelhttp.NewTransport(insecureHTTPTransport, trace.HarborHTTPTraceOptions...)
|
||||
secureHTTPTransport = otelhttp.NewTransport(secureHTTPTransport, trace.HarborHTTPTraceOptions...)
|
||||
}
|
||||
|
||||
// Use this instead of Default Transport in library because it sets ForceAttemptHTTP2 to true
|
||||
// And that options introduced in go 1.13 will cause the https requests hang forever in replication environment
|
||||
func newDefaultTransport() *http.Transport {
|
||||
return &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
DualStack: true,
|
||||
}).DialContext,
|
||||
TLSClientConfig: &tls.Config{},
|
||||
MaxIdleConns: 100,
|
||||
IdleConnTimeout: 90 * time.Second,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
ExpectContinueTimeout: 1 * time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
// WithInternalTLSConfig returns a TransportOption that configures the transport to use the internal TLS configuration
|
||||
func WithInternalTLSConfig() func(*http.Transport) {
|
||||
return func(tr *http.Transport) {
|
||||
tlsConfig, err := GetInternalTLSConfig()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
tr.TLSClientConfig = tlsConfig
|
||||
}
|
||||
}
|
||||
|
||||
// WithInsecureSkipVerify returns a TransportOption that configures the transport to skip verification of the server's certificate
|
||||
func WithInsecureSkipVerify(skipVerify bool) func(*http.Transport) {
|
||||
return func(tr *http.Transport) {
|
||||
tr.TLSClientConfig.InsecureSkipVerify = skipVerify
|
||||
}
|
||||
}
|
||||
|
||||
// WithMaxIdleConnsPerHost returns a TransportOption that configures the transport to use the specified number of idle connections per host
|
||||
func WithMaxIdleConns(maxIdleConns int) func(*http.Transport) {
|
||||
return func(tr *http.Transport) {
|
||||
tr.MaxIdleConns = maxIdleConns
|
||||
}
|
||||
}
|
||||
|
||||
// WithIdleConnTimeout returns a TransportOption that configures the transport to use the specified idle connection timeout
|
||||
func WithIdleconnectionTimeout(idleConnectionTimeout time.Duration) func(*http.Transport) {
|
||||
return func(tr *http.Transport) {
|
||||
tr.IdleConnTimeout = idleConnectionTimeout
|
||||
}
|
||||
}
|
||||
|
||||
// NewTransport returns a new http.Transport with the specified options
|
||||
func NewTransport(opts ...func(*http.Transport)) http.RoundTripper {
|
||||
tr := newDefaultTransport()
|
||||
for _, opt := range opts {
|
||||
opt(tr)
|
||||
}
|
||||
return tr
|
||||
}
|
||||
|
||||
// TransportConfig is the configuration for http transport
|
||||
type TransportConfig struct {
|
||||
Insecure bool
|
||||
}
|
||||
|
||||
// TransportOption is the option for http transport
|
||||
type TransportOption func(*TransportConfig)
|
||||
|
||||
// WithInsecure returns a TransportOption that configures the transport to skip verification of the server's certificate
|
||||
func WithInsecure(skipVerify bool) TransportOption {
|
||||
return func(cfg *TransportConfig) {
|
||||
cfg.Insecure = skipVerify
|
||||
}
|
||||
}
|
||||
|
||||
// GetHTTPTransport returns HttpTransport based on insecure configuration
|
||||
func GetHTTPTransport(opts ...TransportOption) http.RoundTripper {
|
||||
cfg := &TransportConfig{}
|
||||
for _, opt := range opts {
|
||||
opt(cfg)
|
||||
}
|
||||
if cfg.Insecure {
|
||||
return insecureHTTPTransport
|
||||
}
|
||||
return secureHTTPTransport
|
||||
}
|
14
src/common/http/transport_test.go
Normal file
14
src/common/http/transport_test.go
Normal file
@ -0,0 +1,14 @@
|
||||
package http
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestGetHTTPTransport(t *testing.T) {
|
||||
transport := GetHTTPTransport()
|
||||
assert.Equal(t, secureHTTPTransport, transport, "Transport should be secure")
|
||||
transport = GetHTTPTransport(WithInsecure(true))
|
||||
assert.Equal(t, insecureHTTPTransport, transport, "Transport should be insecure")
|
||||
}
|
@ -5,12 +5,13 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/goharbor/harbor/src/lib/config"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/goharbor/harbor/src/lib/config"
|
||||
|
||||
commonhttp "github.com/goharbor/harbor/src/common/http"
|
||||
"github.com/goharbor/harbor/src/common/http/modifier/auth"
|
||||
"github.com/goharbor/harbor/src/common/job/models"
|
||||
@ -61,7 +62,7 @@ type DefaultClient struct {
|
||||
func NewDefaultClient(endpoint, secret string) *DefaultClient {
|
||||
var c *commonhttp.Client
|
||||
httpCli := &http.Client{
|
||||
Transport: commonhttp.GetHTTPTransport(commonhttp.SecureTransport),
|
||||
Transport: commonhttp.GetHTTPTransport(),
|
||||
}
|
||||
if len(secret) > 0 {
|
||||
c = commonhttp.NewClient(httpCli, auth.NewSecretAuthorizer(secret))
|
||||
@ -81,12 +82,12 @@ func NewReplicationClient(endpoint, secret string) *DefaultClient {
|
||||
|
||||
if len(secret) > 0 {
|
||||
c = commonhttp.NewClient(&http.Client{
|
||||
Transport: commonhttp.GetHTTPTransport(commonhttp.SecureTransport),
|
||||
Transport: commonhttp.GetHTTPTransport(),
|
||||
},
|
||||
auth.NewSecretAuthorizer(secret))
|
||||
} else {
|
||||
c = commonhttp.NewClient(&http.Client{
|
||||
Transport: commonhttp.GetHTTPTransport(commonhttp.SecureTransport),
|
||||
Transport: commonhttp.GetHTTPTransport(),
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -20,7 +20,6 @@ import (
|
||||
|
||||
func init() {
|
||||
orm.RegisterModel(
|
||||
new(User),
|
||||
new(Role),
|
||||
new(ResourceLabel),
|
||||
new(OIDCUser),
|
||||
|
@ -15,58 +15,39 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/astaxie/beego/orm"
|
||||
)
|
||||
|
||||
// UserTable is the name of table in DB that holds the user object
|
||||
const UserTable = "harbor_user"
|
||||
|
||||
// User holds the details of a user.
|
||||
type User struct {
|
||||
UserID int `orm:"pk;auto;column(user_id)" json:"user_id"`
|
||||
Username string `orm:"column(username)" json:"username" sort:"default"`
|
||||
Email string `orm:"column(email)" json:"email"`
|
||||
Password string `orm:"column(password)" json:"password"`
|
||||
PasswordVersion string `orm:"column(password_version)" json:"password_version"`
|
||||
Realname string `orm:"column(realname)" json:"realname"`
|
||||
Comment string `orm:"column(comment)" json:"comment"`
|
||||
Deleted bool `orm:"column(deleted)" json:"deleted"`
|
||||
Rolename string `orm:"-" json:"role_name"`
|
||||
// if this field is named as "RoleID", beego orm can not map role_id
|
||||
// to it.
|
||||
Role int `orm:"-" json:"role_id"`
|
||||
SysAdminFlag bool `orm:"column(sysadmin_flag)" json:"sysadmin_flag"`
|
||||
UserID int `json:"user_id"`
|
||||
Username string `json:"username" sort:"default"`
|
||||
Email string `json:"email"`
|
||||
Password string `json:"password"`
|
||||
PasswordVersion string `json:"password_version"`
|
||||
Realname string `json:"realname"`
|
||||
Comment string `json:"comment"`
|
||||
Deleted bool `json:"deleted"`
|
||||
Rolename string `json:"role_name"`
|
||||
Role int `json:"role_id"`
|
||||
SysAdminFlag bool `json:"sysadmin_flag"`
|
||||
// AdminRoleInAuth to store the admin privilege granted by external authentication provider
|
||||
AdminRoleInAuth bool `orm:"-" json:"admin_role_in_auth"`
|
||||
ResetUUID string `orm:"column(reset_uuid)" json:"reset_uuid"`
|
||||
Salt string `orm:"column(salt)" json:"-"`
|
||||
CreationTime time.Time `orm:"column(creation_time);auto_now_add" json:"creation_time"`
|
||||
UpdateTime time.Time `orm:"column(update_time);auto_now" json:"update_time"`
|
||||
GroupIDs []int `orm:"-" json:"-"`
|
||||
OIDCUserMeta *OIDCUser `orm:"-" json:"oidc_user_meta,omitempty"`
|
||||
AdminRoleInAuth bool `json:"admin_role_in_auth"`
|
||||
ResetUUID string `json:"reset_uuid"`
|
||||
Salt string `json:"-"`
|
||||
CreationTime time.Time `json:"creation_time"`
|
||||
UpdateTime time.Time `json:"update_time"`
|
||||
GroupIDs []int `json:"-"`
|
||||
OIDCUserMeta *OIDCUser `json:"oidc_user_meta,omitempty"`
|
||||
}
|
||||
|
||||
// TableName ...
|
||||
func (u *User) TableName() string {
|
||||
return UserTable
|
||||
}
|
||||
type Users []*User
|
||||
|
||||
// FilterByUsernameOrEmail generates the query setter to match username or email column to the same value
|
||||
func (u *User) FilterByUsernameOrEmail(ctx context.Context, qs orm.QuerySeter, key string, value interface{}) orm.QuerySeter {
|
||||
usernameOrEmail, ok := value.(string)
|
||||
if !ok {
|
||||
return qs
|
||||
// MapByUserID returns map which key is UserID of the user and value is the user itself
|
||||
func (users Users) MapByUserID() map[int]*User {
|
||||
m := map[int]*User{}
|
||||
for _, user := range users {
|
||||
m[user.UserID] = user
|
||||
}
|
||||
subCond := orm.NewCondition()
|
||||
subCond = subCond.Or("Username", usernameOrEmail).Or("Email", usernameOrEmail)
|
||||
|
||||
conds := qs.GetCond()
|
||||
if conds == nil {
|
||||
conds = orm.NewCondition()
|
||||
}
|
||||
qs = qs.SetCond(conds.AndCond(subCond))
|
||||
return qs
|
||||
return m
|
||||
}
|
||||
|
@ -30,6 +30,7 @@ const (
|
||||
|
||||
ActionOperate = Action("operate")
|
||||
ActionScannerPull = Action("scanner-pull") // for robot account created by scanner to pull image, bypass the policy check
|
||||
ActionStop = Action("stop") // for stop scan/scan-all execution
|
||||
)
|
||||
|
||||
// const resource variables
|
||||
|
@ -98,6 +98,7 @@ var (
|
||||
|
||||
{Resource: rbac.ResourceScan, Action: rbac.ActionCreate},
|
||||
{Resource: rbac.ResourceScan, Action: rbac.ActionRead},
|
||||
{Resource: rbac.ResourceScan, Action: rbac.ActionStop},
|
||||
|
||||
{Resource: rbac.ResourceScanner, Action: rbac.ActionRead},
|
||||
{Resource: rbac.ResourceScanner, Action: rbac.ActionCreate},
|
||||
@ -185,6 +186,7 @@ var (
|
||||
|
||||
{Resource: rbac.ResourceScan, Action: rbac.ActionCreate},
|
||||
{Resource: rbac.ResourceScan, Action: rbac.ActionRead},
|
||||
{Resource: rbac.ResourceScan, Action: rbac.ActionStop},
|
||||
|
||||
{Resource: rbac.ResourceScanner, Action: rbac.ActionRead},
|
||||
|
||||
|
@ -58,6 +58,7 @@ var (
|
||||
{Resource: rbac.ResourceScanAll, Action: rbac.ActionUpdate},
|
||||
{Resource: rbac.ResourceScanAll, Action: rbac.ActionDelete},
|
||||
{Resource: rbac.ResourceScanAll, Action: rbac.ActionList},
|
||||
{Resource: rbac.ResourceScanAll, Action: rbac.ActionStop},
|
||||
|
||||
{Resource: rbac.ResourceSystemVolumes, Action: rbac.ActionRead},
|
||||
|
||||
|
@ -59,7 +59,14 @@ func ReversibleEncrypt(str, key string) (string, error) {
|
||||
if block, err = aes.NewCipher(keyBytes); err != nil {
|
||||
return "", err
|
||||
}
|
||||
cipherText := make([]byte, aes.BlockSize+len(str))
|
||||
|
||||
// ensures the value is no larger than 64 MB, which fits comfortably within an int and avoids the overflow
|
||||
if len(str) > 64*1024*1024 {
|
||||
return "", errors.New("str value too large")
|
||||
}
|
||||
|
||||
size := aes.BlockSize + len(str)
|
||||
cipherText := make([]byte, size)
|
||||
iv := cipherText[:aes.BlockSize]
|
||||
if _, err = io.ReadFull(rand.Reader, iv); err != nil {
|
||||
return "", err
|
||||
|
@ -163,12 +163,9 @@ func ParseProjectIDOrName(value interface{}) (int64, string, error) {
|
||||
|
||||
var id int64
|
||||
var name string
|
||||
switch value.(type) {
|
||||
case int:
|
||||
i := value.(int)
|
||||
id = int64(i)
|
||||
case int64:
|
||||
id = value.(int64)
|
||||
switch v := value.(type) {
|
||||
case int, int64:
|
||||
id = reflect.ValueOf(v).Int()
|
||||
case string:
|
||||
name = value.(string)
|
||||
default:
|
||||
|
@ -204,7 +204,7 @@ func (c *controller) ensureArtifact(ctx context.Context, repository, digest stri
|
||||
created = true
|
||||
artifact.ID = id
|
||||
return nil
|
||||
})(ctx); err != nil {
|
||||
})(orm.SetTransactionOpNameToContext(ctx, "tx-ensure-artifact")); err != nil {
|
||||
// got error that isn't conflict error, return directly
|
||||
if !errors.IsConflictErr(err) {
|
||||
return false, nil, err
|
||||
@ -376,7 +376,7 @@ func (c *controller) deleteDeeply(ctx context.Context, id int64, isRoot bool) er
|
||||
Digest: art.Digest,
|
||||
})
|
||||
return err
|
||||
})(ctx); err != nil && !errors.IsErr(err, errors.ConflictCode) {
|
||||
})(orm.SetTransactionOpNameToContext(ctx, "tx-delete-artifact-deeply")); err != nil && !errors.IsErr(err, errors.ConflictCode) {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -17,9 +17,10 @@ package blob
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/goharbor/harbor/src/lib/q"
|
||||
"time"
|
||||
|
||||
"github.com/goharbor/harbor/src/lib/q"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/goharbor/harbor/src/lib/errors"
|
||||
"github.com/goharbor/harbor/src/lib/log"
|
||||
@ -303,7 +304,7 @@ func (c *controller) Sync(ctx context.Context, references []distribution.Descrip
|
||||
}
|
||||
|
||||
return nil
|
||||
})(ctx)
|
||||
})(orm.SetTransactionOpNameToContext(ctx, "tx-sync-blob"))
|
||||
}
|
||||
|
||||
if len(missing) > 0 {
|
||||
|
@ -17,6 +17,7 @@ package auditlog
|
||||
import (
|
||||
"context"
|
||||
"github.com/goharbor/harbor/src/controller/event"
|
||||
"github.com/goharbor/harbor/src/lib/config"
|
||||
"github.com/goharbor/harbor/src/lib/log"
|
||||
"github.com/goharbor/harbor/src/pkg/audit"
|
||||
am "github.com/goharbor/harbor/src/pkg/audit/model"
|
||||
@ -39,10 +40,19 @@ func (h *Handler) Name() string {
|
||||
// Handle ...
|
||||
func (h *Handler) Handle(ctx context.Context, value interface{}) error {
|
||||
var auditLog *am.AuditLog
|
||||
var addAuditLog bool
|
||||
switch v := value.(type) {
|
||||
case *event.PushArtifactEvent, *event.PullArtifactEvent, *event.DeleteArtifactEvent,
|
||||
case *event.PushArtifactEvent, *event.DeleteArtifactEvent,
|
||||
*event.DeleteRepositoryEvent, *event.CreateProjectEvent, *event.DeleteProjectEvent,
|
||||
*event.DeleteTagEvent, *event.CreateTagEvent:
|
||||
addAuditLog = true
|
||||
case *event.PullArtifactEvent:
|
||||
addAuditLog = !config.PullAuditLogDisable(ctx)
|
||||
default:
|
||||
log.Errorf("Can not handler this event type! %#v", v)
|
||||
}
|
||||
|
||||
if addAuditLog {
|
||||
resolver := value.(AuditResolver)
|
||||
al, err := resolver.ResolveToAuditLog()
|
||||
if err != nil {
|
||||
@ -50,13 +60,11 @@ func (h *Handler) Handle(ctx context.Context, value interface{}) error {
|
||||
return err
|
||||
}
|
||||
auditLog = al
|
||||
default:
|
||||
log.Errorf("Can not handler this event type! %#v", v)
|
||||
}
|
||||
if auditLog != nil {
|
||||
_, err := audit.Mgr.Create(ctx, auditLog)
|
||||
if err != nil {
|
||||
log.Debugf("add audit log err: %v", err)
|
||||
if auditLog != nil {
|
||||
_, err := audit.Mgr.Create(ctx, auditLog)
|
||||
if err != nil {
|
||||
log.Debugf("add audit log err: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -30,6 +30,7 @@ func init() {
|
||||
notifier.Subscribe(event.TopicQuotaExceed, "a.Handler{})
|
||||
notifier.Subscribe(event.TopicQuotaWarning, "a.Handler{})
|
||||
notifier.Subscribe(event.TopicScanningFailed, &scan.Handler{})
|
||||
notifier.Subscribe(event.TopicScanningStopped, &scan.Handler{})
|
||||
notifier.Subscribe(event.TopicScanningCompleted, &scan.Handler{})
|
||||
notifier.Subscribe(event.TopicDeleteArtifact, &scan.DelArtHandler{})
|
||||
notifier.Subscribe(event.TopicReplication, &artifact.ReplicationHandler{})
|
||||
|
@ -16,6 +16,7 @@ package internal
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/goharbor/harbor/src/lib/config"
|
||||
"time"
|
||||
|
||||
"github.com/goharbor/harbor/src/controller/artifact"
|
||||
@ -54,8 +55,12 @@ func (a *Handler) IsStateful() bool {
|
||||
}
|
||||
|
||||
func (a *Handler) onPull(ctx context.Context, event *event.ArtifactEvent) error {
|
||||
go func() { a.updatePullTime(ctx, event) }()
|
||||
go func() { a.addPullCount(ctx, event) }()
|
||||
if !config.PullTimeUpdateDisable(ctx) {
|
||||
go func() { a.updatePullTime(ctx, event) }()
|
||||
}
|
||||
if !config.PullCountUpdateDisable(ctx) {
|
||||
go func() { a.addPullCount(ctx, event) }()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -41,5 +41,5 @@ func autoScan(ctx context.Context, a *artifact.Artifact, tags ...string) error {
|
||||
}
|
||||
|
||||
return scan.DefaultController.Scan(ctx, a, options...)
|
||||
})(ctx)
|
||||
})(orm.SetTransactionOpNameToContext(ctx, "tx-auto-scan"))
|
||||
}
|
||||
|
@ -29,7 +29,10 @@ func (si *ScanImageMetaData) Resolve(evt *event.Event) error {
|
||||
case job.SuccessStatus:
|
||||
eventType = event2.TopicScanningCompleted
|
||||
topic = event2.TopicScanningCompleted
|
||||
case job.ErrorStatus, job.StoppedStatus:
|
||||
case job.StoppedStatus:
|
||||
eventType = event2.TopicScanningStopped
|
||||
topic = event2.TopicScanningStopped
|
||||
case job.ErrorStatus:
|
||||
eventType = event2.TopicScanningFailed
|
||||
topic = event2.TopicScanningFailed
|
||||
default:
|
||||
|
@ -49,6 +49,48 @@ func (r *scanEventTestSuite) TestResolveOfScanImageEventMetadata() {
|
||||
r.Equal("library/hello-world", data.Artifact.Repository)
|
||||
}
|
||||
|
||||
func (r *scanEventTestSuite) TestResolveOfStopScanImageEventMetadata() {
|
||||
e := &event.Event{}
|
||||
metadata := &ScanImageMetaData{
|
||||
Artifact: &v1.Artifact{
|
||||
NamespaceID: 0,
|
||||
Repository: "library/hello-world",
|
||||
Tag: "latest",
|
||||
Digest: "sha256:absdfd87123",
|
||||
MimeType: "docker.chart",
|
||||
},
|
||||
Status: job.StoppedStatus.String(),
|
||||
}
|
||||
err := metadata.Resolve(e)
|
||||
r.Require().Nil(err)
|
||||
r.Equal(event2.TopicScanningStopped, e.Topic)
|
||||
r.Require().NotNil(e.Data)
|
||||
data, ok := e.Data.(*event2.ScanImageEvent)
|
||||
r.Require().True(ok)
|
||||
r.Equal("library/hello-world", data.Artifact.Repository)
|
||||
}
|
||||
|
||||
func (r *scanEventTestSuite) TestResolveOfFailedScanImageEventMetadata() {
|
||||
e := &event.Event{}
|
||||
metadata := &ScanImageMetaData{
|
||||
Artifact: &v1.Artifact{
|
||||
NamespaceID: 0,
|
||||
Repository: "library/hello-world",
|
||||
Tag: "latest",
|
||||
Digest: "sha256:absdfd87123",
|
||||
MimeType: "docker.chart",
|
||||
},
|
||||
Status: job.ErrorStatus.String(),
|
||||
}
|
||||
err := metadata.Resolve(e)
|
||||
r.Require().Nil(err)
|
||||
r.Equal(event2.TopicScanningFailed, e.Topic)
|
||||
r.Require().NotNil(e.Data)
|
||||
data, ok := e.Data.(*event2.ScanImageEvent)
|
||||
r.Require().True(ok)
|
||||
r.Equal("library/hello-world", data.Artifact.Repository)
|
||||
}
|
||||
|
||||
func TestScanEventTestSuite(t *testing.T) {
|
||||
suite.Run(t, &scanEventTestSuite{})
|
||||
}
|
||||
|
@ -38,6 +38,7 @@ const (
|
||||
TopicCreateTag = "CREATE_TAG"
|
||||
TopicDeleteTag = "DELETE_TAG"
|
||||
TopicScanningFailed = "SCANNING_FAILED"
|
||||
TopicScanningStopped = "SCANNING_STOPPED"
|
||||
TopicScanningCompleted = "SCANNING_COMPLETED"
|
||||
// QuotaExceedTopic is topic for quota warning event, the usage reaches the warning bar of limitation, like 85%
|
||||
TopicQuotaWarning = "QUOTA_WARNING"
|
||||
|
@ -1,13 +1,27 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package gc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/goharbor/harbor/src/lib/config"
|
||||
|
||||
"github.com/goharbor/harbor/src/controller/quota"
|
||||
"github.com/goharbor/harbor/src/jobservice/job"
|
||||
"github.com/goharbor/harbor/src/lib/config"
|
||||
"github.com/goharbor/harbor/src/lib/log"
|
||||
"github.com/goharbor/harbor/src/lib/orm"
|
||||
"github.com/goharbor/harbor/src/pkg/scheduler"
|
||||
@ -30,7 +44,7 @@ func gcCallback(ctx context.Context, p string) error {
|
||||
if err := json.Unmarshal([]byte(p), param); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal the param: %v", err)
|
||||
}
|
||||
_, err := Ctl.Start(orm.Context(), *param, task.ExecutionTriggerSchedule)
|
||||
_, err := Ctl.Start(ctx, *param, task.ExecutionTriggerSchedule)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -48,7 +48,7 @@ func HTTPStatusCodeHealthChecker(method string, url string, header http.Header,
|
||||
}
|
||||
|
||||
client := httputil.NewClient(&http.Client{
|
||||
Transport: httputil.GetHTTPTransport(httputil.SecureTransport),
|
||||
Transport: httputil.GetHTTPTransport(),
|
||||
Timeout: timeout,
|
||||
})
|
||||
resp, err := client.Do(req)
|
||||
|
@ -17,6 +17,7 @@ package member
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/goharbor/harbor/src/common"
|
||||
"github.com/goharbor/harbor/src/core/auth"
|
||||
"github.com/goharbor/harbor/src/lib/errors"
|
||||
@ -26,7 +27,6 @@ import (
|
||||
"github.com/goharbor/harbor/src/pkg/project"
|
||||
"github.com/goharbor/harbor/src/pkg/user"
|
||||
"github.com/goharbor/harbor/src/pkg/usergroup"
|
||||
ugModel "github.com/goharbor/harbor/src/pkg/usergroup/model"
|
||||
)
|
||||
|
||||
// Controller defines the operation related to project member
|
||||
@ -143,7 +143,7 @@ func (c *controller) Create(ctx context.Context, projectNameOrID interface{}, re
|
||||
if u != nil {
|
||||
userID = u.UserID
|
||||
} else {
|
||||
userID, err = auth.SearchAndOnBoardUser(req.MemberUser.Username)
|
||||
userID, err = auth.SearchAndOnBoardUser(ctx, req.MemberUser.Username)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@ -151,19 +151,31 @@ func (c *controller) Create(ctx context.Context, projectNameOrID interface{}, re
|
||||
member.EntityID = userID
|
||||
} else if len(req.MemberGroup.LdapGroupDN) > 0 {
|
||||
req.MemberGroup.GroupType = common.LDAPGroupType
|
||||
// If groupname provided, use the provided groupname to name this group
|
||||
groupID, err := auth.SearchAndOnBoardGroup(req.MemberGroup.LdapGroupDN, req.MemberGroup.GroupName)
|
||||
// if the ldap group dn already exist
|
||||
ugs, err := usergroup.Mgr.List(ctx, q.New(q.KeyWords{"LdapGroupDN": req.MemberGroup.LdapGroupDN, "GroupType": req.MemberGroup.GroupType}))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
member.EntityID = groupID
|
||||
} else if len(req.MemberGroup.GroupName) > 0 && req.MemberGroup.GroupType == common.HTTPGroupType || req.MemberGroup.GroupType == common.OIDCGroupType {
|
||||
ugs, err := usergroup.Mgr.List(ctx, ugModel.UserGroup{GroupName: req.MemberGroup.GroupName, GroupType: req.MemberGroup.GroupType})
|
||||
if len(ugs) > 0 {
|
||||
member.EntityID = ugs[0].ID
|
||||
member.EntityType = common.GroupMember
|
||||
} else {
|
||||
// If groupname provided, use the provided groupname to name this group
|
||||
groupID, err := auth.SearchAndOnBoardGroup(ctx, req.MemberGroup.LdapGroupDN, req.MemberGroup.GroupName)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
member.EntityID = groupID
|
||||
}
|
||||
|
||||
} else if len(req.MemberGroup.GroupName) > 0 {
|
||||
// all group type can be added to project member by name
|
||||
ugs, err := usergroup.Mgr.List(ctx, q.New(q.KeyWords{"GroupName": req.MemberGroup.GroupName, "GroupType": req.MemberGroup.GroupType}))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if len(ugs) == 0 {
|
||||
groupID, err := auth.SearchAndOnBoardGroup(req.MemberGroup.GroupName, "")
|
||||
groupID, err := auth.SearchAndOnBoardGroup(ctx, req.MemberGroup.GroupName, "")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
@ -17,7 +17,6 @@ package preheat
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
proModels "github.com/goharbor/harbor/src/pkg/project/models"
|
||||
"strings"
|
||||
|
||||
tk "github.com/docker/distribution/registry/auth/token"
|
||||
@ -30,7 +29,6 @@ import (
|
||||
"github.com/goharbor/harbor/src/lib/config"
|
||||
"github.com/goharbor/harbor/src/lib/errors"
|
||||
"github.com/goharbor/harbor/src/lib/log"
|
||||
"github.com/goharbor/harbor/src/lib/orm"
|
||||
"github.com/goharbor/harbor/src/lib/q"
|
||||
"github.com/goharbor/harbor/src/lib/selector"
|
||||
"github.com/goharbor/harbor/src/pkg/label/model"
|
||||
@ -40,6 +38,7 @@ import (
|
||||
"github.com/goharbor/harbor/src/pkg/p2p/preheat/models/provider"
|
||||
"github.com/goharbor/harbor/src/pkg/p2p/preheat/policy"
|
||||
pr "github.com/goharbor/harbor/src/pkg/p2p/preheat/provider"
|
||||
proModels "github.com/goharbor/harbor/src/pkg/project/models"
|
||||
"github.com/goharbor/harbor/src/pkg/scan/vuln"
|
||||
"github.com/goharbor/harbor/src/pkg/task"
|
||||
)
|
||||
@ -106,7 +105,7 @@ type extURLGetter func(c *selector.Candidate) (string, error)
|
||||
|
||||
// accessCredMaker is a func template to generate the required credential header value
|
||||
// The purpose of defining such a func template is decoupling code
|
||||
type accessCredMaker func(c *selector.Candidate) (string, error)
|
||||
type accessCredMaker func(ctx context.Context, c *selector.Candidate) (string, error)
|
||||
|
||||
// matchedPolicy is a temporary intermediary struct for passing parameters
|
||||
type matchedPolicy struct {
|
||||
@ -159,7 +158,7 @@ func NewEnforcer() Enforcer {
|
||||
r := fmt.Sprintf("%s/%s", c.Namespace, c.Repository)
|
||||
return fmt.Sprintf(manifestAPIPattern, edp, r, c.Tags[0]), nil
|
||||
},
|
||||
credMaker: func(c *selector.Candidate) (s string, e error) {
|
||||
credMaker: func(ctx context.Context, c *selector.Candidate) (s string, e error) {
|
||||
r := fmt.Sprintf("%s/%s", c.Namespace, c.Repository)
|
||||
|
||||
ac := []*tk.ResourceActions{
|
||||
@ -170,7 +169,7 @@ func NewEnforcer() Enforcer {
|
||||
Actions: []string{resourcePullAction},
|
||||
},
|
||||
}
|
||||
t, err := token.MakeToken(orm.Context(), "distributor", token.Registry, ac)
|
||||
t, err := token.MakeToken(ctx, "distributor", token.Registry, ac)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -434,7 +433,7 @@ func (de *defaultEnforcer) startTask(ctx context.Context, executionID int64, can
|
||||
return -1, err
|
||||
}
|
||||
|
||||
cred, err := de.credMaker(candidate)
|
||||
cred, err := de.credMaker(ctx, candidate)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
@ -17,7 +17,6 @@ package preheat
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
proModels "github.com/goharbor/harbor/src/pkg/project/models"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
@ -34,6 +33,7 @@ import (
|
||||
pr "github.com/goharbor/harbor/src/pkg/p2p/preheat/models/provider"
|
||||
"github.com/goharbor/harbor/src/pkg/p2p/preheat/provider"
|
||||
"github.com/goharbor/harbor/src/pkg/p2p/preheat/provider/auth"
|
||||
proModels "github.com/goharbor/harbor/src/pkg/project/models"
|
||||
"github.com/goharbor/harbor/src/pkg/scan/vuln"
|
||||
ta "github.com/goharbor/harbor/src/pkg/tag/model/tag"
|
||||
"github.com/goharbor/harbor/src/testing/controller/artifact"
|
||||
@ -155,7 +155,7 @@ func (suite *EnforcerTestSuite) SetupSuite() {
|
||||
r := fmt.Sprintf("%s/%s", c.Namespace, c.Repository)
|
||||
return fmt.Sprintf(manifestAPIPattern, "https://testing.harbor.com", r, c.Tags[0]), nil
|
||||
},
|
||||
credMaker: func(c *selector.Candidate) (s string, e error) {
|
||||
credMaker: func(ctx context.Context, c *selector.Candidate) (s string, e error) {
|
||||
return "fake-token", nil
|
||||
},
|
||||
}
|
||||
|
@ -104,7 +104,7 @@ func (c *controller) Create(ctx context.Context, project *models.Project) (int64
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := orm.WithTransaction(h)(ctx); err != nil {
|
||||
if err := orm.WithTransaction(h)(orm.SetTransactionOpNameToContext(ctx, "tx-create-project")); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
|
@ -17,6 +17,7 @@ package project
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
commonmodels "github.com/goharbor/harbor/src/common/models"
|
||||
"testing"
|
||||
|
||||
"github.com/goharbor/harbor/src/lib/errors"
|
||||
@ -24,7 +25,6 @@ import (
|
||||
"github.com/goharbor/harbor/src/lib/q"
|
||||
models2 "github.com/goharbor/harbor/src/pkg/allowlist/models"
|
||||
"github.com/goharbor/harbor/src/pkg/project/models"
|
||||
usermodels "github.com/goharbor/harbor/src/pkg/user/models"
|
||||
ormtesting "github.com/goharbor/harbor/src/testing/lib/orm"
|
||||
"github.com/goharbor/harbor/src/testing/mock"
|
||||
allowlisttesting "github.com/goharbor/harbor/src/testing/pkg/allowlist"
|
||||
@ -50,16 +50,16 @@ func (suite *ControllerTestSuite) TestCreate() {
|
||||
c := controller{projectMgr: mgr, allowlistMgr: allowlistMgr, metaMgr: metadataMgr}
|
||||
|
||||
{
|
||||
metadataMgr.On("Add", ctx, mock.Anything, mock.Anything).Return(nil).Once()
|
||||
mgr.On("Create", ctx, mock.Anything).Return(int64(2), nil).Once()
|
||||
metadataMgr.On("Add", mock.Anything, mock.Anything, mock.Anything).Return(nil).Once()
|
||||
mgr.On("Create", mock.Anything, mock.Anything).Return(int64(2), nil).Once()
|
||||
projectID, err := c.Create(ctx, &models.Project{OwnerID: 1, Metadata: map[string]string{"public": "true"}})
|
||||
suite.Nil(err)
|
||||
suite.Equal(int64(2), projectID)
|
||||
}
|
||||
|
||||
{
|
||||
metadataMgr.On("Add", ctx, mock.Anything, mock.Anything).Return(fmt.Errorf("oops")).Once()
|
||||
mgr.On("Create", ctx, mock.Anything).Return(int64(2), nil).Once()
|
||||
metadataMgr.On("Add", mock.Anything, mock.Anything, mock.Anything).Return(fmt.Errorf("oops")).Once()
|
||||
mgr.On("Create", mock.Anything, mock.Anything).Return(int64(2), nil).Once()
|
||||
projectID, err := c.Create(ctx, &models.Project{OwnerID: 1, Metadata: map[string]string{"public": "true"}})
|
||||
suite.Error(err)
|
||||
suite.Equal(int64(0), projectID)
|
||||
@ -122,8 +122,8 @@ func (suite *ControllerTestSuite) TestWithOwner() {
|
||||
}, nil)
|
||||
|
||||
userMgr := &user.Manager{}
|
||||
userMgr.On("List", ctx, mock.Anything).Return(usermodels.Users{
|
||||
&usermodels.User{UserID: 1, Username: "admin"},
|
||||
userMgr.On("List", ctx, mock.Anything).Return(commonmodels.Users{
|
||||
&commonmodels.User{UserID: 1, Username: "admin"},
|
||||
}, nil)
|
||||
|
||||
c := controller{projectMgr: mgr, userMgr: userMgr}
|
||||
|
@ -17,23 +17,22 @@ package proxy
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/goharbor/harbor/src/controller/tag"
|
||||
proModels "github.com/goharbor/harbor/src/pkg/project/models"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/manifest/manifestlist"
|
||||
"github.com/goharbor/harbor/src/controller/artifact"
|
||||
"github.com/goharbor/harbor/src/controller/blob"
|
||||
"github.com/goharbor/harbor/src/controller/event/operator"
|
||||
"github.com/goharbor/harbor/src/controller/tag"
|
||||
"github.com/goharbor/harbor/src/lib"
|
||||
"github.com/goharbor/harbor/src/lib/cache"
|
||||
"github.com/goharbor/harbor/src/lib/errors"
|
||||
"github.com/goharbor/harbor/src/lib/log"
|
||||
"github.com/goharbor/harbor/src/lib/orm"
|
||||
proModels "github.com/goharbor/harbor/src/pkg/project/models"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
@ -69,6 +68,7 @@ type Controller interface {
|
||||
// EnsureTag ensure tag for digest
|
||||
EnsureTag(ctx context.Context, art lib.ArtifactInfo, tagName string) error
|
||||
}
|
||||
|
||||
type controller struct {
|
||||
blobCtl blob.Controller
|
||||
artifactCtl artifact.Controller
|
||||
@ -137,6 +137,10 @@ type ManifestList struct {
|
||||
ContentType string
|
||||
}
|
||||
|
||||
// UseLocalManifest check if these manifest could be found in local registry,
|
||||
// the return error should be nil when it is not found in local and need to delegate to remote registry
|
||||
// the return error should be NotFoundError when it is not found in remote registry
|
||||
// the error will be captured by framework and return 404 to client
|
||||
func (c *controller) UseLocalManifest(ctx context.Context, art lib.ArtifactInfo, remote RemoteInterface) (bool, *ManifestList, error) {
|
||||
a, err := c.local.GetManifest(ctx, art)
|
||||
if err != nil {
|
||||
@ -160,25 +164,39 @@ func (c *controller) UseLocalManifest(ctx context.Context, art lib.ArtifactInfo,
|
||||
}
|
||||
|
||||
var content []byte
|
||||
if c.cache != nil {
|
||||
err = c.cache.Fetch(getManifestListKey(art.Repository, string(desc.Digest)), &content)
|
||||
if err == nil {
|
||||
log.Debugf("Get the manifest list with key=cache:%v", getManifestListKey(art.Repository, string(desc.Digest)))
|
||||
return true, &ManifestList{content, string(desc.Digest), manifestlist.MediaTypeManifestList}, nil
|
||||
}
|
||||
var contentType string
|
||||
if c.cache == nil {
|
||||
return a != nil && string(desc.Digest) == a.Digest, nil, nil // digest matches
|
||||
}
|
||||
|
||||
err = c.cache.Fetch(manifestListKey(art.Repository, string(desc.Digest)), &content)
|
||||
if err != nil {
|
||||
if err == cache.ErrNotFound {
|
||||
log.Debugf("Digest is not found in manifest list cache, key=cache:%v", getManifestListKey(art.Repository, string(desc.Digest)))
|
||||
log.Debugf("Digest is not found in manifest list cache, key=cache:%v", manifestListKey(art.Repository, string(desc.Digest)))
|
||||
} else {
|
||||
log.Errorf("Failed to get manifest list from cache, error: %v", err)
|
||||
}
|
||||
return a != nil && string(desc.Digest) == a.Digest, nil, nil
|
||||
}
|
||||
return a != nil && string(desc.Digest) == a.Digest, nil, nil // digest matches
|
||||
err = c.cache.Fetch(manifestListContentTypeKey(art.Repository, string(desc.Digest)), &contentType)
|
||||
if err != nil {
|
||||
log.Debugf("failed to get the manifest list content type, not use local. error:%v", err)
|
||||
return false, nil, nil
|
||||
}
|
||||
log.Debugf("Get the manifest list with key=cache:%v", manifestListKey(art.Repository, string(desc.Digest)))
|
||||
return true, &ManifestList{content, string(desc.Digest), contentType}, nil
|
||||
|
||||
}
|
||||
|
||||
func getManifestListKey(repo, dig string) string {
|
||||
func manifestListKey(repo, dig string) string {
|
||||
// actual redis key format is cache:manifestlist:<repo name>:sha256:xxxx
|
||||
return "manifestlist:" + repo + ":" + dig
|
||||
}
|
||||
|
||||
func manifestListContentTypeKey(rep, dig string) string {
|
||||
return manifestListKey(rep, dig) + ":contenttype"
|
||||
}
|
||||
|
||||
func (c *controller) ProxyManifest(ctx context.Context, art lib.ArtifactInfo, remote RemoteInterface) (distribution.Manifest, error) {
|
||||
var man distribution.Manifest
|
||||
remoteRepo := getRemoteRepo(art)
|
||||
@ -227,15 +245,17 @@ func (c *controller) ProxyManifest(ctx context.Context, art lib.ArtifactInfo, re
|
||||
|
||||
return man, nil
|
||||
}
|
||||
|
||||
func (c *controller) HeadManifest(ctx context.Context, art lib.ArtifactInfo, remote RemoteInterface) (bool, *distribution.Descriptor, error) {
|
||||
remoteRepo := getRemoteRepo(art)
|
||||
ref := getReference(art)
|
||||
return remote.ManifestExist(remoteRepo, ref)
|
||||
}
|
||||
|
||||
func (c *controller) ProxyBlob(ctx context.Context, p *proModels.Project, art lib.ArtifactInfo) (int64, io.ReadCloser, error) {
|
||||
remoteRepo := getRemoteRepo(art)
|
||||
log.Debugf("The blob doesn't exist, proxy the request to the target server, url:%v", remoteRepo)
|
||||
rHelper, err := NewRemoteHelper(p.RegistryID)
|
||||
rHelper, err := NewRemoteHelper(ctx, p.RegistryID)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
@ -275,7 +295,7 @@ func (c *controller) waitAndPushManifest(ctx context.Context, remoteRepo string,
|
||||
return
|
||||
}
|
||||
}
|
||||
h.CacheContent(ctx, remoteRepo, man, art, r)
|
||||
h.CacheContent(ctx, remoteRepo, man, art, r, contType)
|
||||
}
|
||||
|
||||
// getRemoteRepo get the remote repository name, used in proxy cache
|
||||
|
@ -51,7 +51,7 @@ func NewCacheHandlerRegistry(local localInterface) map[string]ManifestCacheHandl
|
||||
// ManifestCacheHandler define how to cache manifest content
|
||||
type ManifestCacheHandler interface {
|
||||
// CacheContent - cache the content of the manifest
|
||||
CacheContent(ctx context.Context, remoteRepo string, man distribution.Manifest, art lib.ArtifactInfo, r RemoteInterface)
|
||||
CacheContent(ctx context.Context, remoteRepo string, man distribution.Manifest, art lib.ArtifactInfo, r RemoteInterface, contentType string)
|
||||
}
|
||||
|
||||
// ManifestListCache handle Manifest list type and index type
|
||||
@ -61,14 +61,18 @@ type ManifestListCache struct {
|
||||
}
|
||||
|
||||
// CacheContent ...
|
||||
func (m *ManifestListCache) CacheContent(ctx context.Context, remoteRepo string, man distribution.Manifest, art lib.ArtifactInfo, r RemoteInterface) {
|
||||
func (m *ManifestListCache) CacheContent(ctx context.Context, remoteRepo string, man distribution.Manifest, art lib.ArtifactInfo, r RemoteInterface, contentType string) {
|
||||
_, payload, err := man.Payload()
|
||||
if err != nil {
|
||||
log.Errorf("failed to get payload, error %v", err)
|
||||
return
|
||||
}
|
||||
key := getManifestListKey(art.Repository, art.Digest)
|
||||
key := manifestListKey(art.Repository, art.Digest)
|
||||
log.Debugf("cache manifest list with key=cache:%v", key)
|
||||
err = m.cache.Save(manifestListContentTypeKey(art.Repository, art.Digest), contentType, manifestListCacheInterval)
|
||||
if err != nil {
|
||||
log.Errorf("failed to cache content type, error %v", err)
|
||||
}
|
||||
err = m.cache.Save(key, payload, manifestListCacheInterval)
|
||||
if err != nil {
|
||||
log.Errorf("failed to cache payload, error %v", err)
|
||||
@ -164,7 +168,7 @@ type ManifestCache struct {
|
||||
}
|
||||
|
||||
// CacheContent ...
|
||||
func (m *ManifestCache) CacheContent(ctx context.Context, remoteRepo string, man distribution.Manifest, art lib.ArtifactInfo, r RemoteInterface) {
|
||||
func (m *ManifestCache) CacheContent(ctx context.Context, remoteRepo string, man distribution.Manifest, art lib.ArtifactInfo, r RemoteInterface, contentType string) {
|
||||
var waitBlobs []distribution.Descriptor
|
||||
for n := 0; n < maxManifestWait; n++ {
|
||||
time.Sleep(sleepIntervalSec * time.Second)
|
||||
|
@ -15,13 +15,14 @@
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/goharbor/harbor/src/lib/orm"
|
||||
"github.com/goharbor/harbor/src/pkg/reg"
|
||||
"github.com/goharbor/harbor/src/pkg/reg/adapter"
|
||||
"github.com/goharbor/harbor/src/pkg/reg/model"
|
||||
"io"
|
||||
)
|
||||
|
||||
// RemoteInterface defines operations related to remote repository under proxy
|
||||
@ -42,22 +43,22 @@ type remoteHelper struct {
|
||||
}
|
||||
|
||||
// NewRemoteHelper create a remote interface
|
||||
func NewRemoteHelper(regID int64) (RemoteInterface, error) {
|
||||
func NewRemoteHelper(ctx context.Context, regID int64) (RemoteInterface, error) {
|
||||
r := &remoteHelper{
|
||||
regID: regID,
|
||||
registryMgr: reg.Mgr}
|
||||
if err := r.init(); err != nil {
|
||||
if err := r.init(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (r *remoteHelper) init() error {
|
||||
func (r *remoteHelper) init(ctx context.Context) error {
|
||||
|
||||
if r.registry != nil {
|
||||
return nil
|
||||
}
|
||||
reg, err := r.registryMgr.Get(orm.Context(), r.regID)
|
||||
reg, err := r.registryMgr.Get(ctx, r.regID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -23,19 +23,17 @@ import (
|
||||
"github.com/goharbor/harbor/src/lib/log"
|
||||
"github.com/goharbor/harbor/src/lib/orm"
|
||||
"github.com/goharbor/harbor/src/lib/q"
|
||||
redislib "github.com/goharbor/harbor/src/lib/redis"
|
||||
"github.com/goharbor/harbor/src/lib/retry"
|
||||
"github.com/goharbor/harbor/src/pkg/quota"
|
||||
"github.com/goharbor/harbor/src/pkg/quota/driver"
|
||||
"github.com/goharbor/harbor/src/pkg/quota/types"
|
||||
"github.com/gomodule/redigo/redis"
|
||||
|
||||
// quota driver
|
||||
_ "github.com/goharbor/harbor/src/controller/quota/driver"
|
||||
)
|
||||
|
||||
var (
|
||||
// expire reserved resources when no actions on the key of the reserved resources in redis during 1 hour
|
||||
defaultReservedExpiration = time.Hour
|
||||
defaultRetryTimeout = time.Minute * 5
|
||||
)
|
||||
|
||||
var (
|
||||
@ -82,8 +80,7 @@ type Controller interface {
|
||||
// NewController creates an instance of the default quota controller
|
||||
func NewController() Controller {
|
||||
return &controller{
|
||||
reservedExpiration: defaultReservedExpiration,
|
||||
quotaMgr: quota.Mgr,
|
||||
quotaMgr: quota.Mgr,
|
||||
}
|
||||
}
|
||||
|
||||
@ -167,107 +164,46 @@ func (c *controller) List(ctx context.Context, query *q.Query, options ...Option
|
||||
return quotas, nil
|
||||
}
|
||||
|
||||
func (c *controller) getReservedResources(ctx context.Context, reference, referenceID string) (types.ResourceList, error) {
|
||||
conn := redislib.DefaultPool().Get()
|
||||
defer conn.Close()
|
||||
|
||||
key := reservedResourcesKey(reference, referenceID)
|
||||
|
||||
str, err := redis.String(conn.Do("GET", key))
|
||||
if err == redis.ErrNil {
|
||||
return nil, nil
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return types.NewResourceList(str)
|
||||
}
|
||||
|
||||
func (c *controller) setReservedResources(ctx context.Context, reference, referenceID string, resources types.ResourceList) error {
|
||||
conn := redislib.DefaultPool().Get()
|
||||
defer conn.Close()
|
||||
|
||||
key := reservedResourcesKey(reference, referenceID)
|
||||
|
||||
reply, err := redis.String(conn.Do("SET", key, resources.String(), "EX", int64(c.reservedExpiration/time.Second)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if reply != "OK" {
|
||||
return fmt.Errorf("bad reply value")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *controller) reserveResources(ctx context.Context, reference, referenceID string, resources types.ResourceList) error {
|
||||
reserve := func(ctx context.Context) error {
|
||||
q, err := c.quotaMgr.GetByRefForUpdate(ctx, reference, referenceID)
|
||||
func (c *controller) updateUsageWithRetry(ctx context.Context, reference, referenceID string, op func(hardLimits, used types.ResourceList) (types.ResourceList, error)) error {
|
||||
f := func() error {
|
||||
q, err := c.quotaMgr.GetByRef(ctx, reference, referenceID)
|
||||
if err != nil {
|
||||
return err
|
||||
return retry.Abort(err)
|
||||
}
|
||||
|
||||
hardLimits, err := q.GetHard()
|
||||
if err != nil {
|
||||
return err
|
||||
return retry.Abort(err)
|
||||
}
|
||||
|
||||
used, err := q.GetUsed()
|
||||
if err != nil {
|
||||
return err
|
||||
return retry.Abort(err)
|
||||
}
|
||||
|
||||
reserved, err := c.getReservedResources(ctx, reference, referenceID)
|
||||
newUsed, err := op(hardLimits, used)
|
||||
if err != nil {
|
||||
log.G(ctx).Errorf("failed to get reserved resources for %s %s, error: %v", reference, referenceID, err)
|
||||
return err
|
||||
return retry.Abort(err)
|
||||
}
|
||||
|
||||
newReserved := types.Add(reserved, resources)
|
||||
q.SetUsed(newUsed)
|
||||
|
||||
if err := quota.IsSafe(hardLimits, types.Add(used, reserved), types.Add(used, newReserved), false); err != nil {
|
||||
return errors.DeniedError(err).WithMessage("Quota exceeded when processing the request of %v", err)
|
||||
err = c.quotaMgr.Update(ctx, q)
|
||||
if err != nil && !errors.Is(err, orm.ErrOptimisticLock) {
|
||||
return retry.Abort(err)
|
||||
}
|
||||
|
||||
if err := c.setReservedResources(ctx, reference, referenceID, newReserved); err != nil {
|
||||
log.G(ctx).Errorf("failed to set reserved resources for %s %s, error: %v", reference, referenceID, err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
|
||||
return orm.WithTransaction(reserve)(ctx)
|
||||
}
|
||||
|
||||
func (c *controller) unreserveResources(ctx context.Context, reference, referenceID string, resources types.ResourceList) error {
|
||||
unreserve := func(ctx context.Context) error {
|
||||
if _, err := c.quotaMgr.GetByRefForUpdate(ctx, reference, referenceID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
reserved, err := c.getReservedResources(ctx, reference, referenceID)
|
||||
if err != nil {
|
||||
log.G(ctx).Errorf("failed to get reserved resources for %s %s, error: %v", reference, referenceID, err)
|
||||
return err
|
||||
}
|
||||
|
||||
newReserved := types.Subtract(reserved, resources)
|
||||
// ensure that new used is never negative
|
||||
if negativeUsed := types.IsNegative(newReserved); len(negativeUsed) > 0 {
|
||||
return fmt.Errorf("reserved resources is negative for resource(s): %s", quota.PrettyPrintResourceNames(negativeUsed))
|
||||
}
|
||||
|
||||
if err := c.setReservedResources(ctx, reference, referenceID, newReserved); err != nil {
|
||||
log.G(ctx).Errorf("failed to set reserved resources for %s %s, error: %v", reference, referenceID, err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
options := []retry.Option{
|
||||
retry.Timeout(defaultRetryTimeout),
|
||||
retry.Backoff(false),
|
||||
retry.Callback(func(err error, sleep time.Duration) {
|
||||
log.G(ctx).Debugf("failed to update the quota usage for %s %s, error: %v", reference, referenceID, err)
|
||||
}),
|
||||
}
|
||||
|
||||
return orm.WithTransaction(unreserve)(ctx)
|
||||
return retry.Retry(f, options...)
|
||||
}
|
||||
|
||||
func (c *controller) Refresh(ctx context.Context, reference, referenceID string, options ...Option) error {
|
||||
@ -278,44 +214,17 @@ func (c *controller) Refresh(ctx context.Context, reference, referenceID string,
|
||||
|
||||
opts := newOptions(options...)
|
||||
|
||||
refresh := func(ctx context.Context) error {
|
||||
q, err := c.quotaMgr.GetByRefForUpdate(ctx, reference, referenceID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hardLimits, err := q.GetHard()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
used, err := q.GetUsed()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
calculateUsage := func() (types.ResourceList, error) {
|
||||
newUsed, err := driver.CalculateUsage(ctx, referenceID)
|
||||
if err != nil {
|
||||
log.G(ctx).Errorf("failed to calculate quota usage for %s %s, error: %v", reference, referenceID, err)
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// ensure that new used is never negative
|
||||
if negativeUsed := types.IsNegative(newUsed); len(negativeUsed) > 0 {
|
||||
return fmt.Errorf("quota usage is negative for resource(s): %s", quota.PrettyPrintResourceNames(negativeUsed))
|
||||
}
|
||||
|
||||
if err := quota.IsSafe(hardLimits, used, newUsed, opts.IgnoreLimitation); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
q.SetUsed(newUsed)
|
||||
q.UpdateTime = time.Now()
|
||||
|
||||
return c.quotaMgr.Update(ctx, q)
|
||||
return newUsed, err
|
||||
}
|
||||
|
||||
return orm.WithTransaction(refresh)(ctx)
|
||||
return c.updateUsageWithRetry(ctx, reference, referenceID, refreshResources(calculateUsage, opts.IgnoreLimitation))
|
||||
}
|
||||
|
||||
func (c *controller) Request(ctx context.Context, reference, referenceID string, resources types.ResourceList, f func() error) error {
|
||||
@ -323,28 +232,26 @@ func (c *controller) Request(ctx context.Context, reference, referenceID string,
|
||||
return f()
|
||||
}
|
||||
|
||||
if err := c.reserveResources(ctx, reference, referenceID, resources); err != nil {
|
||||
if err := c.updateUsageWithRetry(ctx, reference, referenceID, reserveResources(resources)); err != nil {
|
||||
log.G(ctx).Errorf("reserve resources %s for %s %s failed, error: %v", resources.String(), reference, referenceID, err)
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := c.unreserveResources(ctx, reference, referenceID, resources); err != nil {
|
||||
// ignore this error because reserved resources will be expired
|
||||
// when no actions on the key of the reserved resources in redis during sometimes
|
||||
log.G(ctx).Warningf("unreserve resources %s for %s %s failed, error: %v", resources.String(), reference, referenceID, err)
|
||||
err := f()
|
||||
|
||||
if err != nil {
|
||||
if er := c.updateUsageWithRetry(ctx, reference, referenceID, rollbackResources(resources)); er != nil {
|
||||
// ignore this error, the quota usage will be correct when users do operations which will call refresh quota
|
||||
log.G(ctx).Warningf("rollback resources %s for %s %s failed, error: %v", resources.String(), reference, referenceID, er)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := f(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return c.Refresh(ctx, reference, referenceID)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *controller) Update(ctx context.Context, u *quota.Quota) error {
|
||||
update := func(ctx context.Context) error {
|
||||
q, err := c.quotaMgr.GetByRefForUpdate(ctx, u.Reference, u.ReferenceID)
|
||||
f := func() error {
|
||||
q, err := c.quotaMgr.GetByRef(ctx, u.Reference, u.ReferenceID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -361,11 +268,15 @@ func (c *controller) Update(ctx context.Context, u *quota.Quota) error {
|
||||
}
|
||||
}
|
||||
|
||||
q.UpdateTime = time.Now()
|
||||
return c.quotaMgr.Update(ctx, q)
|
||||
}
|
||||
|
||||
return orm.WithTransaction(update)(ctx)
|
||||
options := []retry.Option{
|
||||
retry.Timeout(defaultRetryTimeout),
|
||||
retry.Backoff(false),
|
||||
}
|
||||
|
||||
return retry.Retry(f, options...)
|
||||
}
|
||||
|
||||
// Driver returns quota driver for the reference
|
||||
@ -388,6 +299,46 @@ func Validate(ctx context.Context, reference string, hardLimits types.ResourceLi
|
||||
return d.Validate(hardLimits)
|
||||
}
|
||||
|
||||
func reservedResourcesKey(reference, referenceID string) string {
|
||||
return fmt.Sprintf("quota:%s:%s:reserved", reference, referenceID)
|
||||
func reserveResources(resources types.ResourceList) func(hardLimits, used types.ResourceList) (types.ResourceList, error) {
|
||||
return func(hardLimits, used types.ResourceList) (types.ResourceList, error) {
|
||||
newUsed := types.Add(used, resources)
|
||||
|
||||
if err := quota.IsSafe(hardLimits, used, newUsed, false); err != nil {
|
||||
return nil, errors.DeniedError(err).WithMessage("Quota exceeded when processing the request of %v", err)
|
||||
}
|
||||
|
||||
return newUsed, nil
|
||||
}
|
||||
}
|
||||
|
||||
func rollbackResources(resources types.ResourceList) func(hardLimits, used types.ResourceList) (types.ResourceList, error) {
|
||||
return func(hardLimits, used types.ResourceList) (types.ResourceList, error) {
|
||||
newUsed := types.Subtract(used, resources)
|
||||
// ensure that new used is never negative
|
||||
if negativeUsed := types.IsNegative(newUsed); len(negativeUsed) > 0 {
|
||||
return nil, fmt.Errorf("resources is negative for resource(s): %s", quota.PrettyPrintResourceNames(negativeUsed))
|
||||
}
|
||||
|
||||
return newUsed, nil
|
||||
}
|
||||
}
|
||||
|
||||
func refreshResources(calculateUsage func() (types.ResourceList, error), ignoreLimitation bool) func(hardLimits, used types.ResourceList) (types.ResourceList, error) {
|
||||
return func(hardLimits, used types.ResourceList) (types.ResourceList, error) {
|
||||
newUsed, err := calculateUsage()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// ensure that new used is never negative
|
||||
if negativeUsed := types.IsNegative(newUsed); len(negativeUsed) > 0 {
|
||||
return nil, fmt.Errorf("quota usage is negative for resource(s): %s", quota.PrettyPrintResourceNames(negativeUsed))
|
||||
}
|
||||
|
||||
if err := quota.IsSafe(hardLimits, used, newUsed, ignoreLimitation); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newUsed, nil
|
||||
}
|
||||
}
|
||||
|
@ -17,9 +17,7 @@ package quota
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/goharbor/harbor/src/lib/orm"
|
||||
"github.com/goharbor/harbor/src/pkg/quota"
|
||||
@ -51,81 +49,20 @@ func (suite *ControllerTestSuite) SetupTest() {
|
||||
driver.Register(suite.reference, suite.driver)
|
||||
|
||||
suite.quotaMgr = "atesting.Manager{}
|
||||
suite.ctl = &controller{quotaMgr: suite.quotaMgr, reservedExpiration: defaultReservedExpiration}
|
||||
suite.ctl = &controller{quotaMgr: suite.quotaMgr}
|
||||
|
||||
hardLimits := types.ResourceList{types.ResourceStorage: 100}
|
||||
suite.quota = "a.Quota{Hard: hardLimits.String(), Used: types.Zero(hardLimits).String()}
|
||||
}
|
||||
|
||||
func (suite *ControllerTestSuite) PrepareForUpdate(q *quota.Quota, newUsage interface{}) {
|
||||
mock.OnAnything(suite.quotaMgr, "GetByRefForUpdate").Return(q, nil)
|
||||
mock.OnAnything(suite.quotaMgr, "GetByRef").Return(q, nil)
|
||||
|
||||
mock.OnAnything(suite.driver, "CalculateUsage").Return(newUsage, nil)
|
||||
|
||||
mock.OnAnything(suite.quotaMgr, "Update").Return(nil)
|
||||
}
|
||||
|
||||
func (suite *ControllerTestSuite) TestGetReservedResources() {
|
||||
reservedExpiration := time.Second * 3
|
||||
ctl := &controller{reservedExpiration: reservedExpiration}
|
||||
|
||||
reference, referenceID := "reference", uuid.New().String()
|
||||
|
||||
{
|
||||
resources, err := ctl.getReservedResources(context.TODO(), reference, referenceID)
|
||||
suite.Nil(err)
|
||||
suite.Len(resources, 0)
|
||||
}
|
||||
|
||||
suite.Nil(ctl.setReservedResources(context.TODO(), reference, referenceID, types.ResourceList{types.ResourceStorage: 100}))
|
||||
|
||||
{
|
||||
resources, err := ctl.getReservedResources(context.TODO(), reference, referenceID)
|
||||
suite.Nil(err)
|
||||
suite.Len(resources, 1)
|
||||
}
|
||||
|
||||
time.Sleep(reservedExpiration * 2)
|
||||
|
||||
{
|
||||
resources, err := ctl.getReservedResources(context.TODO(), reference, referenceID)
|
||||
suite.Nil(err)
|
||||
suite.Len(resources, 0)
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *ControllerTestSuite) TestReserveResources() {
|
||||
mock.OnAnything(suite.quotaMgr, "GetByRefForUpdate").Return(suite.quota, nil)
|
||||
|
||||
ctx := orm.NewContext(context.TODO(), &ormtesting.FakeOrmer{})
|
||||
referenceID := uuid.New().String()
|
||||
resources := types.ResourceList{types.ResourceStorage: 100}
|
||||
|
||||
ctl := suite.ctl.(*controller)
|
||||
|
||||
suite.Nil(ctl.reserveResources(ctx, suite.reference, referenceID, resources))
|
||||
|
||||
suite.Error(ctl.reserveResources(ctx, suite.reference, referenceID, resources))
|
||||
}
|
||||
|
||||
func (suite *ControllerTestSuite) TestUnreserveResources() {
|
||||
mock.OnAnything(suite.quotaMgr, "GetByRefForUpdate").Return(suite.quota, nil)
|
||||
|
||||
ctx := orm.NewContext(context.TODO(), &ormtesting.FakeOrmer{})
|
||||
referenceID := uuid.New().String()
|
||||
resources := types.ResourceList{types.ResourceStorage: 100}
|
||||
|
||||
ctl := suite.ctl.(*controller)
|
||||
|
||||
suite.Nil(ctl.reserveResources(ctx, suite.reference, referenceID, resources))
|
||||
|
||||
suite.Error(ctl.reserveResources(ctx, suite.reference, referenceID, resources))
|
||||
|
||||
suite.Nil(ctl.unreserveResources(ctx, suite.reference, referenceID, resources))
|
||||
|
||||
suite.Nil(ctl.reserveResources(ctx, suite.reference, referenceID, resources))
|
||||
}
|
||||
|
||||
func (suite *ControllerTestSuite) TestRefresh() {
|
||||
suite.PrepareForUpdate(suite.quota, types.ResourceList{types.ResourceStorage: 0})
|
||||
|
||||
@ -174,6 +111,7 @@ func (suite *ControllerTestSuite) TestNoResourcesRequest() {
|
||||
|
||||
suite.Nil(suite.ctl.Request(ctx, suite.reference, referenceID, nil, func() error { return nil }))
|
||||
}
|
||||
|
||||
func (suite *ControllerTestSuite) TestRequest() {
|
||||
suite.PrepareForUpdate(suite.quota, nil)
|
||||
|
||||
@ -207,25 +145,3 @@ func (suite *ControllerTestSuite) TestRequestFunctionFailed() {
|
||||
func TestControllerTestSuite(t *testing.T) {
|
||||
suite.Run(t, &ControllerTestSuite{})
|
||||
}
|
||||
|
||||
func BenchmarkGetReservedResources(b *testing.B) {
|
||||
ctl := &controller{reservedExpiration: defaultReservedExpiration}
|
||||
|
||||
ctx := context.TODO()
|
||||
reference, referenceID := "reference", uuid.New().String()
|
||||
ctl.setReservedResources(ctx, reference, referenceID, types.ResourceList{types.ResourceStorage: 100})
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
ctl.getReservedResources(ctx, reference, referenceID)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkSetReservedResources(b *testing.B) {
|
||||
ctl := &controller{reservedExpiration: defaultReservedExpiration}
|
||||
|
||||
ctx := context.TODO()
|
||||
for i := 0; i < b.N; i++ {
|
||||
s := strconv.Itoa(i)
|
||||
ctl.setReservedResources(ctx, "reference"+s, s, types.ResourceList{types.ResourceStorage: 100})
|
||||
}
|
||||
}
|
||||
|
@ -16,7 +16,6 @@ package quota
|
||||
|
||||
import (
|
||||
"context"
|
||||
proModels "github.com/goharbor/harbor/src/pkg/project/models"
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
@ -24,6 +23,7 @@ import (
|
||||
"github.com/goharbor/harbor/src/controller/project"
|
||||
"github.com/goharbor/harbor/src/lib/orm"
|
||||
"github.com/goharbor/harbor/src/lib/q"
|
||||
"github.com/goharbor/harbor/src/pkg/project/models"
|
||||
"github.com/goharbor/harbor/src/pkg/quota"
|
||||
"github.com/goharbor/harbor/src/pkg/quota/driver"
|
||||
"github.com/goharbor/harbor/src/pkg/quota/types"
|
||||
@ -76,21 +76,21 @@ func (suite *RefreshForProjectsTestSuite) TestRefreshForProjects() {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
|
||||
startProjectID := rand.Int63()
|
||||
var firstPageProjects, secondPageProjects []*proModels.Project
|
||||
var firstPageProjects, secondPageProjects []*models.Project
|
||||
for i := 0; i < 50; i++ {
|
||||
firstPageProjects = append(firstPageProjects, &proModels.Project{
|
||||
firstPageProjects = append(firstPageProjects, &models.Project{
|
||||
ProjectID: startProjectID + int64(i),
|
||||
})
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
secondPageProjects = append(secondPageProjects, &proModels.Project{
|
||||
secondPageProjects = append(secondPageProjects, &models.Project{
|
||||
ProjectID: startProjectID + 50 + int64(i),
|
||||
})
|
||||
}
|
||||
|
||||
page := 1
|
||||
mock.OnAnything(suite.projectCtl, "List").Return(func(context.Context, *q.Query, ...project.Option) []*proModels.Project {
|
||||
mock.OnAnything(suite.projectCtl, "List").Return(func(context.Context, *q.Query, ...project.Option) []*models.Project {
|
||||
defer func() {
|
||||
page++
|
||||
}()
|
||||
@ -109,7 +109,6 @@ func (suite *RefreshForProjectsTestSuite) TestRefreshForProjects() {
|
||||
q.SetUsed(types.ResourceList{types.ResourceStorage: 0})
|
||||
|
||||
mock.OnAnything(suite.quotaMgr, "GetByRef").Return(q, nil)
|
||||
mock.OnAnything(suite.quotaMgr, "GetByRefForUpdate").Return(q, nil)
|
||||
mock.OnAnything(suite.quotaMgr, "Update").Return(nil)
|
||||
mock.OnAnything(suite.driver, "CalculateUsage").Return(types.ResourceList{types.ResourceStorage: 1}, nil)
|
||||
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
"github.com/goharbor/harbor/src/lib/log"
|
||||
"github.com/goharbor/harbor/src/lib/orm"
|
||||
"github.com/goharbor/harbor/src/lib/q"
|
||||
"github.com/goharbor/harbor/src/lib/retry"
|
||||
"github.com/goharbor/harbor/src/pkg/reg"
|
||||
"github.com/goharbor/harbor/src/pkg/reg/model"
|
||||
"github.com/goharbor/harbor/src/pkg/replication"
|
||||
@ -86,7 +87,7 @@ func NewController() Controller {
|
||||
scheduler: scheduler.Sched,
|
||||
flowCtl: flow.NewController(),
|
||||
ormCreator: orm.Crt,
|
||||
wp: lib.NewWorkerPool(1024),
|
||||
wp: lib.NewWorkerPool(10),
|
||||
}
|
||||
}
|
||||
|
||||
@ -112,13 +113,15 @@ func (c *controller) Start(ctx context.Context, policy *replicationmodel.Policy,
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
c.wp.GetWorker()
|
||||
// start the replication flow in background
|
||||
// as the process runs inside a goroutine, the transaction in the outer ctx
|
||||
// may be submitted already when the process starts, so pass a new context
|
||||
// may be submitted already when the process starts, so create an new context
|
||||
// with orm populated to the goroutine
|
||||
go func(ctx context.Context) {
|
||||
go func() {
|
||||
c.wp.GetWorker()
|
||||
defer c.wp.ReleaseWorker()
|
||||
|
||||
ctx := orm.NewContext(context.Background(), c.ormCreator.Create())
|
||||
// recover in case panic during the adapter process
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
@ -129,7 +132,7 @@ func (c *controller) Start(ctx context.Context, policy *replicationmodel.Policy,
|
||||
|
||||
// as we start a new transaction in the goroutine, the execution record may not
|
||||
// be inserted yet, wait until it is ready before continue
|
||||
if err := lib.RetryUntil(func() error {
|
||||
if err := retry.Retry(func() error {
|
||||
_, err := c.execMgr.Get(ctx, id)
|
||||
return err
|
||||
}); err != nil {
|
||||
@ -144,7 +147,7 @@ func (c *controller) Start(ctx context.Context, policy *replicationmodel.Policy,
|
||||
return
|
||||
}
|
||||
c.markError(ctx, id, err)
|
||||
}(orm.NewContext(context.Background(), c.ormCreator.Create()))
|
||||
}()
|
||||
return id, nil
|
||||
}
|
||||
|
||||
|
@ -91,7 +91,7 @@ func (c *copyFlow) Run(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
return c.createTasks(ctx, srcResources, dstResources)
|
||||
return c.createTasks(ctx, srcResources, dstResources, c.policy.Speed)
|
||||
}
|
||||
|
||||
func (c *copyFlow) isExecutionStopped(ctx context.Context) (bool, error) {
|
||||
@ -102,7 +102,7 @@ func (c *copyFlow) isExecutionStopped(ctx context.Context) (bool, error) {
|
||||
return execution.Status == job.StoppedStatus.String(), nil
|
||||
}
|
||||
|
||||
func (c *copyFlow) createTasks(ctx context.Context, srcResources, dstResources []*model.Resource) error {
|
||||
func (c *copyFlow) createTasks(ctx context.Context, srcResources, dstResources []*model.Resource, speed int32) error {
|
||||
for i, resource := range srcResources {
|
||||
src, err := json.Marshal(resource)
|
||||
if err != nil {
|
||||
@ -121,6 +121,7 @@ func (c *copyFlow) createTasks(ctx context.Context, srcResources, dstResources [
|
||||
Parameters: map[string]interface{}{
|
||||
"src_resource": string(src),
|
||||
"dst_resource": string(dest),
|
||||
"speed": speed,
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -45,6 +45,7 @@ type Policy struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
CreationTime time.Time `json:"creation_time"`
|
||||
UpdateTime time.Time `json:"update_time"`
|
||||
Speed int32 `json:"speed"`
|
||||
}
|
||||
|
||||
// IsScheduledTrigger returns true when the policy is scheduled trigger and enabled
|
||||
@ -130,6 +131,7 @@ func (p *Policy) From(policy *replicationmodel.Policy) error {
|
||||
p.Enabled = policy.Enabled
|
||||
p.CreationTime = policy.CreationTime
|
||||
p.UpdateTime = policy.UpdateTime
|
||||
p.Speed = policy.Speed
|
||||
|
||||
if policy.SrcRegistryID > 0 {
|
||||
p.SrcRegistry = &model.Registry{
|
||||
@ -173,6 +175,7 @@ func (p *Policy) To() (*replicationmodel.Policy, error) {
|
||||
ReplicateDeletion: p.ReplicateDeletion,
|
||||
CreationTime: p.CreationTime,
|
||||
UpdateTime: p.UpdateTime,
|
||||
Speed: p.Speed,
|
||||
}
|
||||
if p.SrcRegistry != nil {
|
||||
policy.SrcRegistryID = p.SrcRegistry.ID
|
||||
|
@ -49,7 +49,7 @@ type transfer struct {
|
||||
dst adapter.ChartRegistry
|
||||
}
|
||||
|
||||
func (t *transfer) Transfer(src *model.Resource, dst *model.Resource) error {
|
||||
func (t *transfer) Transfer(src *model.Resource, dst *model.Resource, speed int32) error {
|
||||
// initialize
|
||||
if err := t.initialize(src, dst); err != nil {
|
||||
return err
|
||||
@ -78,7 +78,7 @@ func (t *transfer) Transfer(src *model.Resource, dst *model.Resource) error {
|
||||
version: dst.Metadata.Artifacts[0].Tags[0],
|
||||
}
|
||||
// copy the chart from source registry to the destination
|
||||
return t.copy(srcChart, dstChart, dst.Override)
|
||||
return t.copy(srcChart, dstChart, dst.Override, speed)
|
||||
}
|
||||
|
||||
func (t *transfer) initialize(src, dst *model.Resource) error {
|
||||
@ -129,7 +129,7 @@ func (t *transfer) shouldStop() bool {
|
||||
return isStopped
|
||||
}
|
||||
|
||||
func (t *transfer) copy(src, dst *chart, override bool) error {
|
||||
func (t *transfer) copy(src, dst *chart, override bool, speed int32) error {
|
||||
if t.shouldStop() {
|
||||
return nil
|
||||
}
|
||||
@ -160,6 +160,10 @@ func (t *transfer) copy(src, dst *chart, override bool) error {
|
||||
t.logger.Errorf("failed to download the chart %s:%s: %v", src.name, src.version, err)
|
||||
return err
|
||||
}
|
||||
if speed > 0 {
|
||||
t.logger.Infof("limit network speed at %d kb/s", speed)
|
||||
chart = trans.NewReader(chart, speed)
|
||||
}
|
||||
defer chart.Close()
|
||||
|
||||
if err = t.dst.UploadChart(dst.name, dst.version, chart); err != nil {
|
||||
|
@ -96,7 +96,7 @@ func TestCopy(t *testing.T) {
|
||||
name: "dest/harbor",
|
||||
version: "0.2.0",
|
||||
}
|
||||
err := transfer.copy(src, dst, true)
|
||||
err := transfer.copy(src, dst, true, 0)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
|
@ -69,9 +69,10 @@ type transfer struct {
|
||||
isStopped trans.StopFunc
|
||||
src adapter.ArtifactRegistry
|
||||
dst adapter.ArtifactRegistry
|
||||
speed int32
|
||||
}
|
||||
|
||||
func (t *transfer) Transfer(src *model.Resource, dst *model.Resource) error {
|
||||
func (t *transfer) Transfer(src *model.Resource, dst *model.Resource, speed int32) error {
|
||||
// initialize
|
||||
if err := t.initialize(src, dst); err != nil {
|
||||
return err
|
||||
@ -88,7 +89,7 @@ func (t *transfer) Transfer(src *model.Resource, dst *model.Resource) error {
|
||||
}
|
||||
|
||||
// copy the repository from source registry to the destination
|
||||
return t.copy(t.convert(src), t.convert(dst), dst.Override)
|
||||
return t.copy(t.convert(src), t.convert(dst), dst.Override, speed)
|
||||
}
|
||||
|
||||
func (t *transfer) convert(resource *model.Resource) *repository {
|
||||
@ -161,14 +162,18 @@ func (t *transfer) shouldStop() bool {
|
||||
return isStopped
|
||||
}
|
||||
|
||||
func (t *transfer) copy(src *repository, dst *repository, override bool) error {
|
||||
func (t *transfer) copy(src *repository, dst *repository, override bool, speed int32) error {
|
||||
srcRepo := src.repository
|
||||
dstRepo := dst.repository
|
||||
t.logger.Infof("copying %s:[%s](source registry) to %s:[%s](destination registry)...",
|
||||
srcRepo, strings.Join(src.tags, ","), dstRepo, strings.Join(dst.tags, ","))
|
||||
if speed > 0 {
|
||||
t.logger.Infof("limit network speed at %d kb/s", speed)
|
||||
}
|
||||
|
||||
var err error
|
||||
for i := range src.tags {
|
||||
if e := t.copyArtifact(srcRepo, src.tags[i], dstRepo, dst.tags[i], override); e != nil {
|
||||
if e := t.copyArtifact(srcRepo, src.tags[i], dstRepo, dst.tags[i], override, speed); e != nil {
|
||||
if e == errStopped {
|
||||
return nil
|
||||
}
|
||||
@ -187,7 +192,7 @@ func (t *transfer) copy(src *repository, dst *repository, override bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *transfer) copyArtifact(srcRepo, srcRef, dstRepo, dstRef string, override bool) error {
|
||||
func (t *transfer) copyArtifact(srcRepo, srcRef, dstRepo, dstRef string, override bool, speed int32) error {
|
||||
t.logger.Infof("copying %s:%s(source registry) to %s:%s(destination registry)...",
|
||||
srcRepo, srcRef, dstRepo, dstRef)
|
||||
// pull the manifest from the source registry
|
||||
@ -221,7 +226,7 @@ func (t *transfer) copyArtifact(srcRepo, srcRef, dstRepo, dstRef string, overrid
|
||||
|
||||
// copy contents between the source and destination registries
|
||||
for _, content := range manifest.References() {
|
||||
if err = t.copyContent(content, srcRepo, dstRepo); err != nil {
|
||||
if err = t.copyContent(content, srcRepo, dstRepo, speed); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -237,7 +242,7 @@ func (t *transfer) copyArtifact(srcRepo, srcRef, dstRepo, dstRef string, overrid
|
||||
}
|
||||
|
||||
// copy the content from source registry to destination according to its media type
|
||||
func (t *transfer) copyContent(content distribution.Descriptor, srcRepo, dstRepo string) error {
|
||||
func (t *transfer) copyContent(content distribution.Descriptor, srcRepo, dstRepo string, speed int32) error {
|
||||
digest := content.Digest.String()
|
||||
switch content.MediaType {
|
||||
// when the media type of pulled manifest is index,
|
||||
@ -246,7 +251,7 @@ func (t *transfer) copyContent(content distribution.Descriptor, srcRepo, dstRepo
|
||||
v1.MediaTypeImageManifest, schema2.MediaTypeManifest,
|
||||
schema1.MediaTypeSignedManifest, schema1.MediaTypeManifest:
|
||||
// as using digest as the reference, so set the override to true directly
|
||||
return t.copyArtifact(srcRepo, digest, dstRepo, digest, true)
|
||||
return t.copyArtifact(srcRepo, digest, dstRepo, digest, true, speed)
|
||||
// handle foreign layer
|
||||
case schema2.MediaTypeForeignLayer:
|
||||
t.logger.Infof("the layer %s is a foreign layer, skip", digest)
|
||||
@ -255,15 +260,15 @@ func (t *transfer) copyContent(content distribution.Descriptor, srcRepo, dstRepo
|
||||
// the media type of the layer or config can be "application/octet-stream",
|
||||
// schema1.MediaTypeManifestLayer, schema2.MediaTypeLayer, schema2.MediaTypeImageConfig
|
||||
default:
|
||||
return t.copyBlobWithRetry(srcRepo, dstRepo, digest, content.Size)
|
||||
return t.copyBlobWithRetry(srcRepo, dstRepo, digest, content.Size, speed)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *transfer) copyBlobWithRetry(srcRepo, dstRepo, digest string, sizeFromDescriptor int64) error {
|
||||
func (t *transfer) copyBlobWithRetry(srcRepo, dstRepo, digest string, sizeFromDescriptor int64, speed int32) error {
|
||||
var err error
|
||||
for i, backoff := 1, 2*time.Second; i <= retry; i, backoff = i+1, backoff*2 {
|
||||
t.logger.Infof("copying the blob %s(the %dth running)...", digest, i)
|
||||
if err = t.copyBlob(srcRepo, dstRepo, digest, sizeFromDescriptor); err == nil {
|
||||
if err = t.copyBlob(srcRepo, dstRepo, digest, sizeFromDescriptor, speed); err == nil {
|
||||
t.logger.Infof("copy the blob %s completed", digest)
|
||||
return nil
|
||||
}
|
||||
@ -278,7 +283,7 @@ func (t *transfer) copyBlobWithRetry(srcRepo, dstRepo, digest string, sizeFromDe
|
||||
|
||||
// copy the layer or artifact config from the source registry to destination
|
||||
// the size parameter is taken from manifests.
|
||||
func (t *transfer) copyBlob(srcRepo, dstRepo, digest string, sizeFromDescriptor int64) error {
|
||||
func (t *transfer) copyBlob(srcRepo, dstRepo, digest string, sizeFromDescriptor int64, speed int32) error {
|
||||
if t.shouldStop() {
|
||||
return errStopped
|
||||
}
|
||||
@ -311,6 +316,9 @@ func (t *transfer) copyBlob(srcRepo, dstRepo, digest string, sizeFromDescriptor
|
||||
t.logger.Errorf("failed to pulling the blob %s: %v", digest, err)
|
||||
return err
|
||||
}
|
||||
if speed > 0 {
|
||||
data = trans.NewReader(data, speed)
|
||||
}
|
||||
defer data.Close()
|
||||
// get size 0 from PullBlob, use size from distribution.Descriptor instead.
|
||||
if size == 0 {
|
||||
@ -318,6 +326,8 @@ func (t *transfer) copyBlob(srcRepo, dstRepo, digest string, sizeFromDescriptor
|
||||
t.logger.Debugf("the blob size from remote registry is 0, use size %d from manifests instead", size)
|
||||
}
|
||||
|
||||
t.logger.Debugf("the blob size is %d bytes", size)
|
||||
|
||||
if err = t.dst.PushBlob(dstRepo, digest, size, data); err != nil {
|
||||
t.logger.Errorf("failed to pushing the blob %s, size %d: %v", digest, size, err)
|
||||
return err
|
||||
|
@ -144,8 +144,7 @@ func TestCopy(t *testing.T) {
|
||||
repository: "destination",
|
||||
tags: []string{"b1", "b2"},
|
||||
}
|
||||
override := true
|
||||
err := tr.copy(src, dst, override)
|
||||
err := tr.copy(src, dst, true, 0)
|
||||
require.Nil(t, err)
|
||||
}
|
||||
|
||||
|
48
src/controller/replication/transfer/iothrottler.go
Normal file
48
src/controller/replication/transfer/iothrottler.go
Normal file
@ -0,0 +1,48 @@
|
||||
package transfer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
type reader struct {
|
||||
reader io.ReadCloser
|
||||
limiter *rate.Limiter
|
||||
}
|
||||
|
||||
type RateOpts struct {
|
||||
Rate float64
|
||||
}
|
||||
|
||||
const KBRATE = 1024 / 8
|
||||
|
||||
// NewReader returns a Reader that is rate limited
|
||||
func NewReader(r io.ReadCloser, kb int32) io.ReadCloser {
|
||||
l := rate.NewLimiter(rate.Limit(kb*KBRATE), 1000*1024)
|
||||
return &reader{
|
||||
reader: r,
|
||||
limiter: l,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *reader) Read(buf []byte) (int, error) {
|
||||
n, err := r.reader.Read(buf)
|
||||
if n <= 0 {
|
||||
return n, err
|
||||
}
|
||||
now := time.Now()
|
||||
rv := r.limiter.ReserveN(now, n)
|
||||
if !rv.OK() {
|
||||
return 0, fmt.Errorf("exceeds limiter's burst")
|
||||
}
|
||||
delay := rv.DelayFrom(now)
|
||||
time.Sleep(delay)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (r *reader) Close() error {
|
||||
return r.reader.Close()
|
||||
}
|
@ -34,7 +34,7 @@ type Factory func(Logger, StopFunc) (Transfer, error)
|
||||
// Transfer defines an interface used to transfer the source
|
||||
// resource to the destination
|
||||
type Transfer interface {
|
||||
Transfer(src *model.Resource, dst *model.Resource) error
|
||||
Transfer(src *model.Resource, dst *model.Resource, speed int32) error
|
||||
}
|
||||
|
||||
// Logger defines an interface for logging
|
||||
|
@ -108,7 +108,7 @@ func (c *controller) Ensure(ctx context.Context, name string) (bool, int64, erro
|
||||
}
|
||||
created = true
|
||||
return nil
|
||||
})(ctx); err != nil {
|
||||
})(orm.SetTransactionOpNameToContext(ctx, "tx-repository-ensure")); err != nil {
|
||||
// isn't conflict error, return directly
|
||||
if !errors.IsConflictErr(err) {
|
||||
return false, 0, err
|
||||
|
@ -3,6 +3,9 @@ package robot
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
rbac_project "github.com/goharbor/harbor/src/common/rbac/project"
|
||||
"github.com/goharbor/harbor/src/common/utils"
|
||||
"github.com/goharbor/harbor/src/lib/config"
|
||||
@ -15,7 +18,6 @@ import (
|
||||
rbac_model "github.com/goharbor/harbor/src/pkg/rbac/model"
|
||||
robot "github.com/goharbor/harbor/src/pkg/robot"
|
||||
"github.com/goharbor/harbor/src/pkg/robot/model"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -88,7 +90,12 @@ func (d *controller) Create(ctx context.Context, r *Robot) (int64, string, error
|
||||
r.Duration = int64(config.RobotTokenDuration(ctx))
|
||||
expiresAt = time.Now().AddDate(0, 0, config.RobotTokenDuration(ctx)).Unix()
|
||||
} else {
|
||||
expiresAt = time.Now().AddDate(0, 0, int(r.Duration)).Unix()
|
||||
durationStr := strconv.FormatInt(r.Duration, 10)
|
||||
duration, err := strconv.Atoi(durationStr)
|
||||
if err != nil {
|
||||
return 0, "", err
|
||||
}
|
||||
expiresAt = time.Now().AddDate(0, 0, duration).Unix()
|
||||
}
|
||||
|
||||
pwd := utils.GenerateRandomString()
|
||||
|
@ -31,6 +31,11 @@ type Robot struct {
|
||||
Permissions []*Permission `json:"permissions"`
|
||||
}
|
||||
|
||||
// IsSysLevel, true is a system level robot, others are project level.
|
||||
func (r *Robot) IsSysLevel() bool {
|
||||
return r.Level == LEVELSYSTEM
|
||||
}
|
||||
|
||||
// setLevel, 0 is a system level robot, others are project level.
|
||||
func (r *Robot) setLevel() {
|
||||
if r.ProjectID == 0 {
|
||||
@ -56,6 +61,11 @@ type Permission struct {
|
||||
Scope string `json:"-"`
|
||||
}
|
||||
|
||||
// IsCoverAll ...
|
||||
func (p *Permission) IsCoverAll() bool {
|
||||
return p.Scope == SCOPEALLPROJECT
|
||||
}
|
||||
|
||||
// Option ...
|
||||
type Option struct {
|
||||
WithPermission bool
|
||||
|
@ -31,6 +31,24 @@ func (suite *ModelTestSuite) TestSetLevel() {
|
||||
suite.Equal(LEVELPROJECT, r.Level)
|
||||
}
|
||||
|
||||
func (suite *ModelTestSuite) TestIsSysLevel() {
|
||||
r := Robot{
|
||||
Robot: model.Robot{
|
||||
ProjectID: 0,
|
||||
},
|
||||
}
|
||||
r.setLevel()
|
||||
suite.True(r.IsSysLevel())
|
||||
|
||||
r = Robot{
|
||||
Robot: model.Robot{
|
||||
ProjectID: 1,
|
||||
},
|
||||
}
|
||||
r.setLevel()
|
||||
suite.False(r.IsSysLevel())
|
||||
}
|
||||
|
||||
func (suite *ModelTestSuite) TestSetEditable() {
|
||||
r := Robot{
|
||||
Robot: model.Robot{
|
||||
@ -38,7 +56,7 @@ func (suite *ModelTestSuite) TestSetEditable() {
|
||||
},
|
||||
}
|
||||
r.setEditable()
|
||||
suite.Equal(false, r.Editable)
|
||||
suite.False(r.Editable)
|
||||
|
||||
r = Robot{
|
||||
Robot: model.Robot{
|
||||
@ -66,7 +84,29 @@ func (suite *ModelTestSuite) TestSetEditable() {
|
||||
},
|
||||
}
|
||||
r.setEditable()
|
||||
suite.Equal(true, r.Editable)
|
||||
suite.True(r.Editable)
|
||||
}
|
||||
|
||||
func (suite *ModelTestSuite) TestIsCoverAll() {
|
||||
p := &Permission{
|
||||
Kind: "project",
|
||||
Namespace: "library",
|
||||
Access: []*types.Policy{
|
||||
{
|
||||
Resource: "repository",
|
||||
Action: "push",
|
||||
},
|
||||
{
|
||||
Resource: "repository",
|
||||
Action: "pull",
|
||||
},
|
||||
},
|
||||
Scope: "/project/*",
|
||||
}
|
||||
suite.True(p.IsCoverAll())
|
||||
|
||||
p.Scope = "/system"
|
||||
suite.False(p.IsCoverAll())
|
||||
}
|
||||
|
||||
func TestModelTestSuite(t *testing.T) {
|
||||
|
@ -28,12 +28,12 @@ import (
|
||||
sc "github.com/goharbor/harbor/src/controller/scanner"
|
||||
"github.com/goharbor/harbor/src/controller/tag"
|
||||
"github.com/goharbor/harbor/src/jobservice/job"
|
||||
"github.com/goharbor/harbor/src/lib"
|
||||
"github.com/goharbor/harbor/src/lib/config"
|
||||
"github.com/goharbor/harbor/src/lib/errors"
|
||||
"github.com/goharbor/harbor/src/lib/log"
|
||||
"github.com/goharbor/harbor/src/lib/orm"
|
||||
"github.com/goharbor/harbor/src/lib/q"
|
||||
"github.com/goharbor/harbor/src/lib/retry"
|
||||
allowlist "github.com/goharbor/harbor/src/pkg/allowlist/models"
|
||||
"github.com/goharbor/harbor/src/pkg/permission/types"
|
||||
"github.com/goharbor/harbor/src/pkg/robot/model"
|
||||
@ -313,6 +313,24 @@ func (bc *basicController) Scan(ctx context.Context, artifact *ar.Artifact, opti
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop scan job of a given artifact
|
||||
func (bc *basicController) Stop(ctx context.Context, artifact *ar.Artifact) error {
|
||||
if artifact == nil {
|
||||
return errors.New("nil artifact to stop scan")
|
||||
}
|
||||
query := q.New(q.KeyWords{"extra_attrs.artifact.digest": artifact.Digest})
|
||||
executions, err := bc.execMgr.List(ctx, query)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(executions) == 0 {
|
||||
message := fmt.Sprintf("no scan job for artifact digest=%v", artifact.Digest)
|
||||
return errors.BadRequestError(nil).WithMessage(message)
|
||||
}
|
||||
execution := executions[0]
|
||||
return bc.execMgr.Stop(ctx, execution.ID)
|
||||
}
|
||||
|
||||
func (bc *basicController) ScanAll(ctx context.Context, trigger string, async bool) (int64, error) {
|
||||
executionID, err := bc.execMgr.Create(ctx, VendorTypeScanAll, 0, trigger)
|
||||
if err != nil {
|
||||
@ -322,7 +340,7 @@ func (bc *basicController) ScanAll(ctx context.Context, trigger string, async bo
|
||||
if async {
|
||||
go func(ctx context.Context) {
|
||||
// if async, this is running in another goroutine ensure the execution exists in db
|
||||
err := lib.RetryUntil(func() error {
|
||||
err := retry.Retry(func() error {
|
||||
_, err := bc.execMgr.Get(ctx, executionID)
|
||||
return err
|
||||
})
|
||||
@ -361,7 +379,7 @@ func (bc *basicController) startScanAll(ctx context.Context, executionID int64)
|
||||
return bc.Scan(ctx, artifact, WithExecutionID(executionID))
|
||||
}
|
||||
|
||||
if err := orm.WithTransaction(scan)(bc.makeCtx()); err != nil {
|
||||
if err := orm.WithTransaction(scan)(orm.SetTransactionOpNameToContext(bc.makeCtx(), "tx-start-scanall")); err != nil {
|
||||
// Just logged
|
||||
log.Errorf("failed to scan artifact %s, error %v", artifact, err)
|
||||
|
||||
@ -482,7 +500,7 @@ func (bc *basicController) makeReportPlaceholder(ctx context.Context, r *scanner
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := orm.WithTransaction(create)(ctx); err != nil {
|
||||
if err := orm.WithTransaction(create)(orm.SetTransactionOpNameToContext(ctx, "tx-make-report-placeholder")); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -358,6 +358,45 @@ func (suite *ControllerTestSuite) TestScanControllerScan() {
|
||||
}
|
||||
}
|
||||
|
||||
// TestScanControllerStop ...
|
||||
func (suite *ControllerTestSuite) TestScanControllerStop() {
|
||||
{
|
||||
// artifact not provieded
|
||||
suite.Require().Error(suite.c.Stop(context.TODO(), nil))
|
||||
}
|
||||
|
||||
{
|
||||
// success
|
||||
mock.OnAnything(suite.execMgr, "List").Return([]*task.Execution{
|
||||
{ExtraAttrs: suite.makeExtraAttrs("rp-uuid-001"), Status: "Running"},
|
||||
}, nil).Once()
|
||||
mock.OnAnything(suite.execMgr, "Stop").Return(nil).Once()
|
||||
|
||||
ctx := orm.NewContext(nil, &ormtesting.FakeOrmer{})
|
||||
|
||||
suite.Require().NoError(suite.c.Stop(ctx, suite.artifact))
|
||||
}
|
||||
|
||||
{
|
||||
// failed due to no execution returned by List
|
||||
mock.OnAnything(suite.execMgr, "List").Return([]*task.Execution{}, nil).Once()
|
||||
mock.OnAnything(suite.execMgr, "Stop").Return(nil).Once()
|
||||
|
||||
ctx := orm.NewContext(nil, &ormtesting.FakeOrmer{})
|
||||
|
||||
suite.Require().Error(suite.c.Stop(ctx, suite.artifact))
|
||||
}
|
||||
|
||||
{
|
||||
// failed due to execMgr.List() errored out
|
||||
mock.OnAnything(suite.execMgr, "List").Return([]*task.Execution{}, fmt.Errorf("failed to call execMgr.List()")).Once()
|
||||
|
||||
ctx := orm.NewContext(nil, &ormtesting.FakeOrmer{})
|
||||
|
||||
suite.Require().Error(suite.c.Stop(ctx, suite.artifact))
|
||||
}
|
||||
}
|
||||
|
||||
// TestScanControllerGetReport ...
|
||||
func (suite *ControllerTestSuite) TestScanControllerGetReport() {
|
||||
mock.OnAnything(suite.ar, "Walk").Return(nil).Run(func(args mock.Arguments) {
|
||||
|
@ -50,6 +50,16 @@ type Controller interface {
|
||||
// error : non nil error if any errors occurred
|
||||
Scan(ctx context.Context, artifact *artifact.Artifact, options ...Option) error
|
||||
|
||||
// Stop scan job of the given artifact
|
||||
//
|
||||
// Arguments:
|
||||
// ctx context.Context : the context for this method
|
||||
// artifact *artifact.Artifact : the artifact whose scan job to be stopped
|
||||
//
|
||||
// Returns:
|
||||
// error : non nil error if any errors occurred
|
||||
Stop(ctx context.Context, artifact *artifact.Artifact) error
|
||||
|
||||
// GetReport gets the reports for the given artifact identified by the digest
|
||||
//
|
||||
// Arguments:
|
||||
|
@ -16,6 +16,8 @@ package tag
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/goharbor/harbor/src/common/utils"
|
||||
"github.com/goharbor/harbor/src/lib/errors"
|
||||
"github.com/goharbor/harbor/src/lib/log"
|
||||
@ -28,7 +30,6 @@ import (
|
||||
"github.com/goharbor/harbor/src/pkg/signature"
|
||||
"github.com/goharbor/harbor/src/pkg/tag"
|
||||
model_tag "github.com/goharbor/harbor/src/pkg/tag/model/tag"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -115,7 +116,7 @@ func (c *controller) Ensure(ctx context.Context, repositoryID, artifactID int64,
|
||||
tag.PushTime = time.Now()
|
||||
_, err = c.Create(ctx, tag)
|
||||
return err
|
||||
})(ctx); err != nil && !errors.IsConflictErr(err) {
|
||||
})(orm.SetTransactionOpNameToContext(ctx, "tx-tag-ensure")); err != nil && !errors.IsConflictErr(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -16,16 +16,15 @@ package user
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/goharbor/harbor/src/common"
|
||||
"github.com/goharbor/harbor/src/lib"
|
||||
"github.com/goharbor/harbor/src/pkg/member"
|
||||
|
||||
"github.com/goharbor/harbor/src/common"
|
||||
commonmodels "github.com/goharbor/harbor/src/common/models"
|
||||
"github.com/goharbor/harbor/src/common/security"
|
||||
"github.com/goharbor/harbor/src/common/security/local"
|
||||
"github.com/goharbor/harbor/src/lib"
|
||||
"github.com/goharbor/harbor/src/lib/errors"
|
||||
"github.com/goharbor/harbor/src/lib/q"
|
||||
"github.com/goharbor/harbor/src/pkg/member"
|
||||
"github.com/goharbor/harbor/src/pkg/oidc"
|
||||
"github.com/goharbor/harbor/src/pkg/user"
|
||||
"github.com/goharbor/harbor/src/pkg/user/models"
|
||||
@ -45,29 +44,29 @@ type Controller interface {
|
||||
// UpdatePassword ...
|
||||
UpdatePassword(ctx context.Context, id int, password string) error
|
||||
// List ...
|
||||
List(ctx context.Context, query *q.Query) ([]*models.User, error)
|
||||
List(ctx context.Context, query *q.Query, options ...models.Option) ([]*commonmodels.User, error)
|
||||
// Create ...
|
||||
Create(ctx context.Context, u *models.User) (int, error)
|
||||
Create(ctx context.Context, u *commonmodels.User) (int, error)
|
||||
// Count ...
|
||||
Count(ctx context.Context, query *q.Query) (int64, error)
|
||||
// Get ...
|
||||
Get(ctx context.Context, id int, opt *Option) (*models.User, error)
|
||||
Get(ctx context.Context, id int, opt *Option) (*commonmodels.User, error)
|
||||
// GetByName gets the user model by username, it only supports getting the basic and does not support opt
|
||||
GetByName(ctx context.Context, username string) (*models.User, error)
|
||||
GetByName(ctx context.Context, username string) (*commonmodels.User, error)
|
||||
// GetBySubIss gets the user model by subject and issuer, the result will contain the basic user model and does not support opt
|
||||
GetBySubIss(ctx context.Context, sub, iss string) (*models.User, error)
|
||||
GetBySubIss(ctx context.Context, sub, iss string) (*commonmodels.User, error)
|
||||
// Delete ...
|
||||
Delete(ctx context.Context, id int) error
|
||||
// UpdateProfile update the profile based on the ID and data in the model in parm, only a subset of attributes in the model
|
||||
// will be update, see the implementation of manager.
|
||||
UpdateProfile(ctx context.Context, u *models.User, cols ...string) error
|
||||
UpdateProfile(ctx context.Context, u *commonmodels.User, cols ...string) error
|
||||
// SetCliSecret sets the OIDC CLI secret for a user
|
||||
SetCliSecret(ctx context.Context, id int, secret string) error
|
||||
// UpdateOIDCMeta updates the OIDC metadata of a user, if the cols are not provided, by default the field of token and secret will be updated
|
||||
UpdateOIDCMeta(ctx context.Context, ou *commonmodels.OIDCUser, cols ...string) error
|
||||
// OnboardOIDCUser inserts the record for basic user info and the oidc metadata
|
||||
// if the onboard process is successful the input parm of user model will be populated with user id
|
||||
OnboardOIDCUser(ctx context.Context, u *models.User) error
|
||||
OnboardOIDCUser(ctx context.Context, u *commonmodels.User) error
|
||||
}
|
||||
|
||||
// NewController ...
|
||||
@ -98,7 +97,7 @@ func (c *controller) UpdateOIDCMeta(ctx context.Context, ou *commonmodels.OIDCUs
|
||||
return c.oidcMetaMgr.Update(ctx, ou, cols...)
|
||||
}
|
||||
|
||||
func (c *controller) OnboardOIDCUser(ctx context.Context, u *models.User) error {
|
||||
func (c *controller) OnboardOIDCUser(ctx context.Context, u *commonmodels.User) error {
|
||||
if u == nil {
|
||||
return errors.BadRequestError(nil).WithMessage("user model is nil")
|
||||
}
|
||||
@ -120,7 +119,7 @@ func (c *controller) OnboardOIDCUser(ctx context.Context, u *models.User) error
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *controller) GetBySubIss(ctx context.Context, sub, iss string) (*models.User, error) {
|
||||
func (c *controller) GetBySubIss(ctx context.Context, sub, iss string) (*commonmodels.User, error) {
|
||||
oidcMeta, err := c.oidcMetaMgr.GetBySubIss(ctx, sub, iss)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -128,7 +127,7 @@ func (c *controller) GetBySubIss(ctx context.Context, sub, iss string) (*models.
|
||||
return c.Get(ctx, oidcMeta.UserID, nil)
|
||||
}
|
||||
|
||||
func (c *controller) GetByName(ctx context.Context, username string) (*models.User, error) {
|
||||
func (c *controller) GetByName(ctx context.Context, username string) (*commonmodels.User, error) {
|
||||
return c.mgr.GetByName(ctx, username)
|
||||
}
|
||||
|
||||
@ -136,23 +135,20 @@ func (c *controller) SetCliSecret(ctx context.Context, id int, secret string) er
|
||||
return c.oidcMetaMgr.SetCliSecretByUserID(ctx, id, secret)
|
||||
}
|
||||
|
||||
func (c *controller) Create(ctx context.Context, u *models.User) (int, error) {
|
||||
func (c *controller) Create(ctx context.Context, u *commonmodels.User) (int, error) {
|
||||
return c.mgr.Create(ctx, u)
|
||||
}
|
||||
|
||||
func (c *controller) UpdateProfile(ctx context.Context, u *models.User, cols ...string) error {
|
||||
func (c *controller) UpdateProfile(ctx context.Context, u *commonmodels.User, cols ...string) error {
|
||||
return c.mgr.UpdateProfile(ctx, u, cols...)
|
||||
}
|
||||
|
||||
func (c *controller) Get(ctx context.Context, id int, opt *Option) (*models.User, error) {
|
||||
func (c *controller) Get(ctx context.Context, id int, opt *Option) (*commonmodels.User, error) {
|
||||
u, err := c.mgr.Get(ctx, id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sctx, ok := security.FromContext(ctx)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("can't find security context")
|
||||
}
|
||||
sctx, _ := security.FromContext(ctx)
|
||||
lsc, ok := sctx.(*local.SecurityContext)
|
||||
if ok && lsc.User() != nil && lsc.User().UserID == id {
|
||||
u.AdminRoleInAuth = lsc.User().AdminRoleInAuth
|
||||
@ -185,8 +181,8 @@ func (c *controller) Delete(ctx context.Context, id int) error {
|
||||
return c.mgr.Delete(ctx, id)
|
||||
}
|
||||
|
||||
func (c *controller) List(ctx context.Context, query *q.Query) ([]*models.User, error) {
|
||||
return c.mgr.List(ctx, query)
|
||||
func (c *controller) List(ctx context.Context, query *q.Query, options ...models.Option) ([]*commonmodels.User, error) {
|
||||
return c.mgr.List(ctx, query, options...)
|
||||
}
|
||||
|
||||
func (c *controller) UpdatePassword(ctx context.Context, id int, password string) error {
|
||||
|
@ -16,9 +16,11 @@ package usergroup
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/goharbor/harbor/src/common"
|
||||
"github.com/goharbor/harbor/src/core/auth"
|
||||
"github.com/goharbor/harbor/src/lib/errors"
|
||||
"github.com/goharbor/harbor/src/lib/q"
|
||||
"github.com/goharbor/harbor/src/pkg/ldap"
|
||||
"github.com/goharbor/harbor/src/pkg/usergroup"
|
||||
"github.com/goharbor/harbor/src/pkg/usergroup/model"
|
||||
@ -44,7 +46,9 @@ type Controller interface {
|
||||
// Populate populate user group and get the user group's id
|
||||
Populate(ctx context.Context, userGroups []model.UserGroup) ([]int, error)
|
||||
// List list user groups
|
||||
List(ctx context.Context, userGroup model.UserGroup) ([]*model.UserGroup, error)
|
||||
List(ctx context.Context, q *q.Query) ([]*model.UserGroup, error)
|
||||
// Count user group count
|
||||
Count(ctx context.Context, q *q.Query) (int64, error)
|
||||
}
|
||||
|
||||
type controller struct {
|
||||
@ -55,8 +59,8 @@ func newController() Controller {
|
||||
return &controller{mgr: usergroup.Mgr}
|
||||
}
|
||||
|
||||
func (c *controller) List(ctx context.Context, userGroup model.UserGroup) ([]*model.UserGroup, error) {
|
||||
return c.mgr.List(ctx, userGroup)
|
||||
func (c *controller) List(ctx context.Context, query *q.Query) ([]*model.UserGroup, error) {
|
||||
return c.mgr.List(ctx, query)
|
||||
}
|
||||
|
||||
func (c *controller) Populate(ctx context.Context, userGroups []model.UserGroup) ([]int, error) {
|
||||
@ -72,7 +76,7 @@ func (c *controller) Delete(ctx context.Context, id int) error {
|
||||
}
|
||||
|
||||
func (c *controller) Update(ctx context.Context, id int, groupName string) error {
|
||||
ug, err := c.mgr.List(ctx, model.UserGroup{ID: id})
|
||||
ug, err := c.mgr.List(ctx, q.New(q.KeyWords{"ID": id}))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -84,7 +88,7 @@ func (c *controller) Update(ctx context.Context, id int, groupName string) error
|
||||
|
||||
func (c *controller) Create(ctx context.Context, group model.UserGroup) (int, error) {
|
||||
if group.GroupType == common.LDAPGroupType {
|
||||
ldapGroup, err := auth.SearchGroup(group.LdapGroupDN)
|
||||
ldapGroup, err := auth.SearchGroup(ctx, group.LdapGroupDN)
|
||||
if err == ldap.ErrNotFound || ldapGroup == nil {
|
||||
return 0, errors.BadRequestError(nil).WithMessage("LDAP Group DN is not found: DN:%v", group.LdapGroupDN)
|
||||
}
|
||||
@ -109,3 +113,7 @@ func (c *controller) Create(ctx context.Context, group model.UserGroup) (int, er
|
||||
func (c *controller) Get(ctx context.Context, id int) (*model.UserGroup, error) {
|
||||
return c.mgr.Get(ctx, id)
|
||||
}
|
||||
|
||||
func (c *controller) Count(ctx context.Context, query *q.Query) (int64, error) {
|
||||
return c.mgr.Count(ctx, query)
|
||||
}
|
||||
|
@ -12,8 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !darwin
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user