Merge branch 'master' into official-wehook-events-20190811

This commit is contained in:
mmpei 2019-08-22 22:07:12 -05:00 committed by GitHub
commit d5f87063e4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
258 changed files with 6105 additions and 1377 deletions

View File

@ -28,7 +28,7 @@ env:
- POSTGRESQL_PWD: root123
- POSTGRESQL_DATABASE: registry
- ADMINSERVER_URL: http://127.0.0.1:8888
- DOCKER_COMPOSE_VERSION: 1.22.0
- DOCKER_COMPOSE_VERSION: 1.23.0
- HARBOR_ADMIN: admin
- HARBOR_ADMIN_PASSWD: Harbor12345
- CORE_SECRET: tempString

View File

@ -100,7 +100,7 @@ PREPARE_VERSION_NAME=versions
REGISTRYVERSION=v2.7.1-patch-2819
NGINXVERSION=$(VERSIONTAG)
NOTARYVERSION=v0.6.1
CLAIRVERSION=v2.0.8
CLAIRVERSION=v2.0.9
CLAIRDBVERSION=$(VERSIONTAG)
MIGRATORVERSION=$(VERSIONTAG)
REDISVERSION=$(VERSIONTAG)

View File

@ -49,7 +49,7 @@ Harbor is hosted by the [Cloud Native Computing Foundation](https://cncf.io) (CN
**System requirements:**
**On a Linux host:** docker 17.06.0-ce+ and docker-compose 1.18.0+ .
**On a Linux host:** docker 17.06.0-ce+ and docker-compose 1.23.0+ .
Download binaries of **[Harbor release ](https://github.com/vmware/harbor/releases)** and follow **[Installation & Configuration Guide](docs/installation_guide.md)** to install Harbor.

View File

@ -4,14 +4,13 @@ This guide provides instructions for developers to build and run Harbor from sou
## Step 1: Prepare for a build environment for Harbor
Harbor is deployed as several Docker containers and most of the code is written in Go language. The build environment requires Python, Docker, Docker Compose and golang development environment. Please install the below prerequisites:
Harbor is deployed as several Docker containers and most of the code is written in Go language. The build environment requires Docker, Docker Compose and golang development environment. Please install the below prerequisites:
Software | Required Version
----------------------|--------------------------
docker | 17.05 +
docker-compose | 1.11.0 +
python | 2.7 +
docker-compose | 1.23.0 +
git | 1.9.1 +
make | 3.81 +
golang* | 1.7.3 +

View File

@ -31,7 +31,7 @@ Harbor is deployed as several Docker containers, and, therefore, can be deployed
|Software|Version|Description|
|---|---|---|
|Docker engine|version 17.06.0-ce+ or higher|For installation instructions, please refer to: [docker engine doc](https://docs.docker.com/engine/installation/)|
|Docker Compose|version 1.18.0 or higher|For installation instructions, please refer to: [docker compose doc](https://docs.docker.com/compose/install/)|
|Docker Compose|version 1.23.0 or higher|For installation instructions, please refer to: [docker compose doc](https://docs.docker.com/compose/install/)|
|Openssl|latest is preferred|Generate certificate and keys for Harbor|
### Network ports

View File

@ -4,7 +4,7 @@ This guide provides instructions to manage roles by LDAP/AD group. You can impor
## Prerequisite
1. Harbor's auth_mode is ldap_auth and **[basic LDAP configure paremters](https://github.com/vmware/harbor/blob/master/docs/installation_guide.md#optional-parameters)** are configured.
1. Harbor's auth_mode is ldap_auth and **[basic LDAP configure parameters](https://github.com/vmware/harbor/blob/master/docs/installation_guide.md#optional-parameters)** are configured.
1. Memberof overlay
This feature requires the LDAP/AD server enabled the feature **memberof overlay**.

View File

@ -122,8 +122,6 @@ paths:
responses:
'200':
description: Project name exists.
'401':
description: User need to log in first.
'404':
description: Project name does not exist.
'500':
@ -333,10 +331,10 @@ paths:
description: Illegal format of provided ID value.
'401':
description: User need to log in first.
'404':
description: Project ID does not exist.
'403':
description: User does not have permission to get summary of the project.
'404':
description: Project ID does not exist.
'500':
description: Unexpected internal errors.
'/projects/{project_id}/metadatas':
@ -1263,11 +1261,16 @@ paths:
type: string
required: true
description: Relevant repository name.
- name: label_ids
- name: label_id
in: query
type: string
required: false
description: A list of comma separated label IDs.
description: A label ID.
- name: detail
in: query
type: boolean
required: false
description: Bool value indicating whether return detailed information of the tag, such as vulnerability scan info, if set to false, only tag name is returned.
tags:
- Products
responses:
@ -2380,10 +2383,10 @@ paths:
$ref: '#/definitions/Namespace'
'401':
description: User need to login first.
'404':
description: No registry found.
'403':
description: User has no privilege for the operation.
'404':
description: No registry found.
'500':
description: Unexpected internal errors.
/internal/syncregistry:
@ -2404,6 +2407,20 @@ paths:
$ref: '#/responses/UnsupportedMediaType'
'500':
description: Unexpected internal errors.
/internal/syncquota:
post:
summary: Sync quota from registry/chart to DB.
description: |
This endpoint is for syncing quota usage of registry/chart with database.
tags:
- Products
responses:
'200':
description: Sync repositories successfully.
'401':
description: User need to log in first.
'403':
description: User does not have permission of system admin role.
/systeminfo:
get:
summary: Get general system info
@ -3684,7 +3701,7 @@ paths:
description: Unexpected internal errors.
'/projects/{project_id}/webhook/policies':
get:
sumary: List project webhook policies.
summary: List project webhook policies.
description: |
This endpoint returns webhook policies of a project.
parameters:
@ -3712,7 +3729,7 @@ paths:
'500':
description: Unexpected internal errors.
post:
sumary: Create project webhook policy.
summary: Create project webhook policy.
description: |
This endpoint create a webhook policy if the project does not have one.
parameters:
@ -3757,7 +3774,7 @@ paths:
in: path
description: The id of webhook policy.
required: true
type: int64
type: integer
format: int64
tags:
- Products
@ -3791,7 +3808,7 @@ paths:
in: path
description: The id of webhook policy.
required: true
type: int64
type: integer
format: int64
- name: policy
in: body
@ -3829,7 +3846,7 @@ paths:
in: path
description: The id of webhook policy.
required: true
type: int64
type: integer
format: int64
tags:
- Products
@ -3908,7 +3925,7 @@ paths:
description: Internal server errors.
'/projects/{project_id}/webhook/jobs':
get:
sumary: List project webhook jobs
summary: List project webhook jobs
description: |
This endpoint returns webhook jobs of a project.
parameters:
@ -4023,6 +4040,9 @@ definitions:
metadata:
description: The metadata of the project.
$ref: '#/definitions/ProjectMetadata'
cve_whitelist:
description: The CVE whitelist of the project.
$ref: '#/definitions/CVEWhitelist'
count_limit:
type: integer
format: int64
@ -4083,16 +4103,20 @@ definitions:
description: 'The public status of the project. The valid values are "true", "false".'
enable_content_trust:
type: string
description: 'Whether content trust is enabled or not. If it is enabled, user cann''t pull unsigned images from this project. The valid values are "true", "false".'
description: 'Whether content trust is enabled or not. If it is enabled, user can''t pull unsigned images from this project. The valid values are "true", "false".'
prevent_vul:
type: string
description: 'Whether prevent the vulnerable images from running. The valid values are "true", "false".'
severity:
type: string
description: 'If the vulnerability is high than severity defined here, the images cann''t be pulled. The valid values are "negligible", "low", "medium", "high", "critical".'
description: 'If the vulnerability is high than severity defined here, the images can''t be pulled. The valid values are "negligible", "low", "medium", "high", "critical".'
auto_scan:
type: string
description: 'Whether scan images automatically when pushing. The valid values are "true", "false".'
reuse_sys_cve_whitelist:
type: string
description: 'Whether this project reuse the system level CVE whitelist as the whitelist of its own. The valid values are "true", "false".
If it is set to "true" the actual whitelist associate with this project, if any, will be ignored.'
ProjectSummary:
type: object
properties:
@ -4841,6 +4865,9 @@ definitions:
project_creation_restriction:
type: string
description: This attribute restricts what users have the permission to create project. It can be "everyone" or "adminonly".
quota_per_project_enable:
type: boolean
description: This attribute indicates whether quota per project enabled in harbor
read_only:
type: boolean
description: '''docker push'' is prohibited by Harbor if you set it to true. '
@ -4938,6 +4965,9 @@ definitions:
project_creation_restriction:
$ref: '#/definitions/StringConfigItem'
description: This attribute restricts what users have the permission to create project. It can be "everyone" or "adminonly".
quota_per_project_enable:
$ref: '#/definitions/BoolConfigItem'
description: This attribute indicates whether quota per project enabled in harbor
read_only:
$ref: '#/definitions/BoolConfigItem'
description: '''docker push'' is prohibited by Harbor if you set it to true. '
@ -5349,7 +5379,9 @@ definitions:
properties:
type:
type: string
description: The schedule type. The valid values are hourly, daily weekly, custom and None. 'None' means to cancel the schedule.
description: |
The schedule type. The valid values are 'Hourly', 'Daily', 'Weekly', 'Custom', 'Manually' and 'None'.
'Manually' means to trigger it right away and 'None' means to cancel the schedule.
cron:
type: string
description: A cron expression, a time-based job scheduler.
@ -5724,7 +5756,7 @@ definitions:
description: The webhook job ID.
policy_id:
type: integer
fromat: int64
format: int64
description: The webhook policy ID.
event_type:
type: string

View File

@ -30,6 +30,11 @@ harbor_admin_password: Harbor12345
database:
# The password for the root user of Harbor DB. Change this before any production use.
password: root123
# The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained.
max_idle_conns: 50
# The maximum number of open connections to the database. If it <= 0, then there is no limit on the number of open connections.
# Note: the default number of connections is 100 for postgres.
max_open_conns: 100
# The default data volume
data_volume: /data
@ -50,18 +55,12 @@ data_volume: /data
# disabled: false
# Clair configuration
clair:
clair:
# The interval of clair updaters, the unit is hour, set to 0 to disable the updaters.
updaters_interval: 12
# Config http proxy for Clair, e.g. http://my.proxy.com:3128
# Clair doesn't need to connect to harbor internal components via http proxy.
http_proxy:
https_proxy:
no_proxy: 127.0.0.1,localhost,core,registry
jobservice:
# Maximum number of job workers in job service
# Maximum number of job workers in job service
max_job_workers: 10
notification:
@ -80,8 +79,8 @@ log:
local:
# Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated.
rotate_count: 50
# Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes.
# If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G
# Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes.
# If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G
# are all valid.
rotate_size: 200M
# The directory on your host that store log
@ -97,7 +96,7 @@ log:
# port: 5140
#This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY!
_version: 1.8.0
_version: 1.9.0
# Uncomment external_database if using external database.
# external_database:
@ -143,3 +142,20 @@ _version: 1.8.0
# Uncomment uaa for trusting the certificate of uaa instance that is hosted via self-signed cert.
# uaa:
# ca_file: /path/to/ca
# Global proxy
# Config http proxy for components, e.g. http://my.proxy.com:3128
# Components doesn't need to connect to each others via http proxy.
# Remove component from `components` array if want disable proxy
# for it. If you want use proxy for replication, MUST enable proxy
# for core and jobservice, and set `http_proxy` and `https_proxy`.
# Add domain to the `no_proxy` field, when you want disable proxy
# for some special registry.
proxy:
http_proxy:
https_proxy:
no_proxy: 127.0.0.1,localhost,.local,.internal,log,db,redis,nginx,core,portal,postgresql,jobservice,registry,registryctl,clair
components:
- core
- jobservice
- clair

View File

@ -117,7 +117,7 @@ function check_docker {
function check_dockercompose {
if ! docker-compose --version &> /dev/null
then
error "Need to install docker-compose(1.18.0+) by yourself first and run this script again."
error "Need to install docker-compose(1.23.0+) by yourself first and run this script again."
exit 1
fi
@ -129,9 +129,9 @@ function check_dockercompose {
docker_compose_version_part2=${BASH_REMATCH[3]}
# the version of docker-compose does not meet the requirement
if [ "$docker_compose_version_part1" -lt 1 ] || ([ "$docker_compose_version_part1" -eq 1 ] && [ "$docker_compose_version_part2" -lt 18 ])
if [ "$docker_compose_version_part1" -lt 1 ] || ([ "$docker_compose_version_part1" -eq 1 ] && [ "$docker_compose_version_part2" -lt 23 ])
then
error "Need to upgrade docker-compose package to 1.18.0+."
error "Need to upgrade docker-compose package to 1.23.0+."
exit 1
else
note "docker-compose version: $docker_compose_version"

View File

@ -56,9 +56,9 @@ $$;
CREATE TRIGGER harbor_user_update_time_at_modtime BEFORE UPDATE ON harbor_user FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
insert into harbor_user (username, email, password, realname, comment, deleted, sysadmin_flag, creation_time, update_time) values
('admin', 'admin@example.com', '', 'system admin', 'admin user',false, true, NOW(), NOW()),
('anonymous', 'anonymous@example.com', '', 'anonymous user', 'anonymous user', true, false, NOW(), NOW());
insert into harbor_user (username, password, realname, comment, deleted, sysadmin_flag, creation_time, update_time) values
('admin', '', 'system admin', 'admin user',false, true, NOW(), NOW()),
('anonymous', '', 'anonymous user', 'anonymous user', true, false, NOW(), NOW());
create table project (
project_id SERIAL PRIMARY KEY NOT NULL,

View File

@ -86,6 +86,7 @@ CREATE TABLE quota_usage
UNIQUE (reference, reference_id)
);
/* only set quota and usage for 'library', and let the sync quota handling others. */
INSERT INTO quota (reference, reference_id, hard, creation_time, update_time)
SELECT 'project',
CAST(project_id AS VARCHAR),
@ -93,7 +94,7 @@ SELECT 'project',
NOW(),
NOW()
FROM project
WHERE deleted = 'f';
WHERE name = 'library' and deleted = 'f';
INSERT INTO quota_usage (id, reference, reference_id, used, creation_time, update_time)
SELECT id,
@ -131,6 +132,8 @@ create table retention_task
repository varchar(255),
job_id varchar(64),
status varchar(32),
status_code integer,
status_revision integer,
start_time timestamp default CURRENT_TIMESTAMP,
end_time timestamp default CURRENT_TIMESTAMP,
total integer,

View File

@ -1,6 +1,6 @@
FROM photon:2.0
RUN tdnf install sudo -y >> /dev/null\
RUN tdnf install sudo tzdata -y >> /dev/null \
&& tdnf clean all \
&& groupadd -r -g 10000 harbor && useradd --no-log-init -r -g 10000 -u 10000 harbor \
&& mkdir /harbor/

View File

@ -1,6 +1,6 @@
FROM photon:2.0
RUN tdnf install sudo -y >> /dev/null\
RUN tdnf install sudo tzdata -y >> /dev/null \
&& tdnf clean all \
&& groupadd -r -g 10000 harbor && useradd --no-log-init -r -g 10000 -u 10000 harbor

View File

@ -1,7 +1,8 @@
FROM node:10.15.0 as nodeportal
COPY src/portal /portal_src
COPY ./docs/swagger.yaml /portal_src
COPY ./docs/swagger.yaml /portal_src
COPY ./LICENSE /portal_src
WORKDIR /build_dir
@ -21,6 +22,7 @@ FROM photon:2.0
COPY --from=nodeportal /build_dir/dist /usr/share/nginx/html
COPY --from=nodeportal /build_dir/swagger.yaml /usr/share/nginx/html
COPY --from=nodeportal /build_dir/swagger.json /usr/share/nginx/html
COPY --from=nodeportal /build_dir/LICENSE /usr/share/nginx/html
COPY make/photon/portal/nginx.conf /etc/nginx/nginx.conf

View File

@ -12,11 +12,12 @@ REDIS_UID = 999
REDIS_GID = 999
## Global variable
host_root_dir = '/hostfs'
base_dir = '/harbor_make'
templates_dir = "/usr/src/app/templates"
config_dir = '/config'
data_dir = '/data'
secret_dir = '/secret'
secret_key_dir='/secret/keys'

View File

@ -1,3 +1,3 @@
http_proxy={{clair_http_proxy}}
https_proxy={{clair_https_proxy}}
no_proxy={{clair_no_proxy}}
HTTP_PROXY={{clair_http_proxy}}
HTTPS_PROXY={{clair_https_proxy}}
NO_PROXY={{clair_no_proxy}}

View File

@ -15,6 +15,8 @@ POSTGRESQL_USERNAME={{harbor_db_username}}
POSTGRESQL_PASSWORD={{harbor_db_password}}
POSTGRESQL_DATABASE={{harbor_db_name}}
POSTGRESQL_SSLMODE={{harbor_db_sslmode}}
POSTGRESQL_MAX_IDLE_CONNS={{harbor_db_max_idle_conns}}
POSTGRESQL_MAX_OPEN_CONNS={{harbor_db_max_open_conns}}
REGISTRY_URL={{registry_url}}
TOKEN_SERVICE_URL={{token_service_url}}
HARBOR_ADMIN_PASSWORD={{harbor_admin_password}}
@ -41,3 +43,7 @@ RELOAD_KEY={{reload_key}}
CHART_REPOSITORY_URL={{chart_repository_url}}
REGISTRY_CONTROLLER_URL={{registry_controller_url}}
WITH_CHARTMUSEUM={{with_chartmuseum}}
HTTP_PROXY={{core_http_proxy}}
HTTPS_PROXY={{core_https_proxy}}
NO_PROXY={{core_no_proxy}}

View File

@ -276,12 +276,7 @@ services:
volumes:
- ./common/config/nginx:/etc/nginx:z
{% if protocol == 'https' %}
- type: bind
source: {{cert_key_path}}
target: /etc/cert/server.key
- type: bind
source: {{cert_path}}
target: /etc/cert/server.crt
- {{data_volume}}/secret/cert:/etc/cert:z
{% endif %}
networks:
- harbor

View File

@ -2,3 +2,7 @@ CORE_SECRET={{core_secret}}
JOBSERVICE_SECRET={{jobservice_secret}}
CORE_URL={{core_url}}
JOBSERVICE_WEBHOOK_JOB_MAX_RETRY={{notification_webhook_job_max_retry}}
HTTP_PROXY={{jobservice_http_proxy}}
HTTPS_PROXY={{jobservice_https_proxy}}
NO_PROXY={{jobservice_no_proxy}}

View File

@ -112,6 +112,11 @@ def parse_yaml_config(config_file_path):
config_dict['harbor_db_username'] = 'postgres'
config_dict['harbor_db_password'] = db_configs.get("password") or ''
config_dict['harbor_db_sslmode'] = 'disable'
default_max_idle_conns = 2 # NOTE: https://golang.org/pkg/database/sql/#DB.SetMaxIdleConns
default_max_open_conns = 0 # NOTE: https://golang.org/pkg/database/sql/#DB.SetMaxOpenConns
config_dict['harbor_db_max_idle_conns'] = db_configs.get("max_idle_conns") or default_max_idle_conns
config_dict['harbor_db_max_open_conns'] = db_configs.get("max_open_conns") or default_max_open_conns
# clari db
config_dict['clair_db_host'] = 'postgresql'
config_dict['clair_db_port'] = 5432
@ -171,13 +176,18 @@ def parse_yaml_config(config_file_path):
if storage_config.get('redirect'):
config_dict['storage_redirect_disabled'] = storage_config['redirect']['disabled']
# Global proxy configs
proxy_config = configs.get('proxy') or {}
proxy_components = proxy_config.get('components') or []
for proxy_component in proxy_components:
config_dict[proxy_component + '_http_proxy'] = proxy_config.get('http_proxy') or ''
config_dict[proxy_component + '_https_proxy'] = proxy_config.get('https_proxy') or ''
config_dict[proxy_component + '_no_proxy'] = proxy_config.get('no_proxy') or '127.0.0.1,localhost,core,registry'
# Clair configs, optional
clair_configs = configs.get("clair") or {}
config_dict['clair_db'] = 'postgres'
config_dict['clair_updaters_interval'] = clair_configs.get("updaters_interval") or 12
config_dict['clair_http_proxy'] = clair_configs.get('http_proxy') or ''
config_dict['clair_https_proxy'] = clair_configs.get('https_proxy') or ''
config_dict['clair_no_proxy'] = clair_configs.get('no_proxy') or '127.0.0.1,localhost,core,registry'
# Chart configs
chart_configs = configs.get("chart") or {}
@ -286,4 +296,4 @@ def parse_yaml_config(config_file_path):
# UAA configs
config_dict['uaa'] = configs.get('uaa') or {}
return config_dict
return config_dict

View File

@ -13,7 +13,7 @@ def prepare_docker_compose(configs, with_clair, with_notary, with_chartmuseum):
VERSION_TAG = versions.get('VERSION_TAG') or 'dev'
REGISTRY_VERSION = versions.get('REGISTRY_VERSION') or 'v2.7.1'
NOTARY_VERSION = versions.get('NOTARY_VERSION') or 'v0.6.1'
CLAIR_VERSION = versions.get('CLAIR_VERSION') or 'v2.0.7'
CLAIR_VERSION = versions.get('CLAIR_VERSION') or 'v2.0.9'
CHARTMUSEUM_VERSION = versions.get('CHARTMUSEUM_VERSION') or 'v0.9.0'
rendering_variables = {

View File

@ -2,11 +2,13 @@ import os, shutil
from fnmatch import fnmatch
from pathlib import Path
from g import config_dir, templates_dir, DEFAULT_GID, DEFAULT_UID
from g import config_dir, templates_dir, host_root_dir, DEFAULT_GID, DEFAULT_UID, data_dir
from utils.misc import prepare_dir, mark_file
from utils.jinja import render_jinja
from utils.cert import SSL_CERT_KEY_PATH, SSL_CERT_PATH
host_ngx_real_cert_dir = Path(os.path.join(data_dir, 'secret', 'cert'))
nginx_conf = os.path.join(config_dir, "nginx", "nginx.conf")
nginx_confd_dir = os.path.join(config_dir, "nginx", "conf.d")
nginx_https_conf_template = os.path.join(templates_dir, "nginx", "nginx.https.conf.jinja")
@ -20,8 +22,38 @@ def prepare_nginx(config_dict):
prepare_dir(nginx_confd_dir, uid=DEFAULT_UID, gid=DEFAULT_GID)
render_nginx_template(config_dict)
def prepare_nginx_certs(cert_key_path, cert_path):
"""
Prepare the certs file with proper ownership
1. Remove nginx cert files in secret dir
2. Copy cert files on host filesystem to secret dir
3. Change the permission to 644 and ownership to 10000:10000
"""
host_ngx_cert_key_path = Path(os.path.join(host_root_dir, cert_key_path.lstrip('/')))
host_ngx_cert_path = Path(os.path.join(host_root_dir, cert_path.lstrip('/')))
if host_ngx_real_cert_dir.exists() and host_ngx_real_cert_dir.is_dir():
shutil.rmtree(host_ngx_real_cert_dir)
os.makedirs(host_ngx_real_cert_dir, mode=0o755)
real_key_path = os.path.join(host_ngx_real_cert_dir, 'server.key')
real_crt_path = os.path.join(host_ngx_real_cert_dir, 'server.crt')
shutil.copy2(host_ngx_cert_key_path, real_key_path)
shutil.copy2(host_ngx_cert_path, real_crt_path)
os.chown(host_ngx_real_cert_dir, uid=DEFAULT_UID, gid=DEFAULT_GID)
mark_file(real_key_path, uid=DEFAULT_UID, gid=DEFAULT_GID)
mark_file(real_crt_path, uid=DEFAULT_UID, gid=DEFAULT_GID)
def render_nginx_template(config_dict):
if config_dict['protocol'] == "https":
"""
1. render nginx config file through protocol
2. copy additional configs to cert.d dir
"""
if config_dict['protocol'] == 'https':
prepare_nginx_certs(config_dict['cert_key_path'], config_dict['cert_path'])
render_jinja(
nginx_https_conf_template,
nginx_conf,
@ -30,12 +62,7 @@ def render_nginx_template(config_dict):
ssl_cert=SSL_CERT_PATH,
ssl_cert_key=SSL_CERT_KEY_PATH)
location_file_pattern = CUSTOM_NGINX_LOCATION_FILE_PATTERN_HTTPS
cert_dir = Path(os.path.join(config_dir, 'cert'))
ssl_key_path = Path(os.path.join(cert_dir, 'server.key'))
ssl_crt_path = Path(os.path.join(cert_dir, 'server.crt'))
cert_dir.mkdir(parents=True, exist_ok=True)
ssl_key_path.touch()
ssl_crt_path.touch()
else:
render_jinja(
nginx_http_conf_template,
@ -45,22 +72,23 @@ def render_nginx_template(config_dict):
location_file_pattern = CUSTOM_NGINX_LOCATION_FILE_PATTERN_HTTP
copy_nginx_location_configs_if_exist(nginx_template_ext_dir, nginx_confd_dir, location_file_pattern)
def add_additional_location_config(src, dst):
"""
These conf files is used for user that wanna add additional customized locations to harbor proxy
:params src: source of the file
:params dst: destination file path
"""
if not os.path.isfile(src):
return
print("Copying nginx configuration file {src} to {dst}".format(
src=src, dst=dst))
shutil.copy2(src, dst)
mark_file(dst, mode=0o644)
def copy_nginx_location_configs_if_exist(src_config_dir, dst_config_dir, filename_pattern):
if not os.path.exists(src_config_dir):
return
def add_additional_location_config(src, dst):
"""
These conf files is used for user that wanna add additional customized locations to harbor proxy
:params src: source of the file
:params dst: destination file path
"""
if not os.path.isfile(src):
return
print("Copying nginx configuration file {src} to {dst}".format(src=src, dst=dst))
shutil.copy2(src, dst)
mark_file(dst, mode=0o644)
map(lambda filename: add_additional_location_config(
os.path.join(src_config_dir, filename),
os.path.join(dst_config_dir, filename)),

View File

@ -9,6 +9,13 @@ registry_config_dir = os.path.join(config_dir, "registry")
registry_config_template_path = os.path.join(templates_dir, "registry", "config.yml.jinja")
registry_conf = os.path.join(config_dir, "registry", "config.yml")
levels_map = {
'debug': 'debug',
'info': 'info',
'warning': 'warn',
'error': 'error',
'fatal': 'fatal'
}
def prepare_registry(config_dict):
prepare_dir(registry_config_dir)
@ -22,6 +29,7 @@ def prepare_registry(config_dict):
registry_conf,
uid=DEFAULT_UID,
gid=DEFAULT_GID,
level=levels_map[config_dict['log_level']],
storage_provider_info=storage_provider_info,
**config_dict)

View File

@ -1,8 +1,8 @@
#!/bin/bash
set +e
# If compling source code this dir is harbor's make dir
# If install harbor via pacakge, this dir is harbor's root dir
# If compiling source code this dir is harbor's make dir.
# If installing harbor via pacakge, this dir is harbor's root dir.
if [[ -n "$HARBOR_BUNDLE_DIR" ]]; then
harbor_prepare_path=$HARBOR_BUNDLE_DIR
else
@ -50,6 +50,7 @@ docker run --rm -v $input_dir:/input:z \
-v $harbor_prepare_path:/compose_location:z \
-v $config_dir:/config:z \
-v $secret_dir:/secret:z \
-v /:/hostfs:z \
goharbor/prepare:dev $@
echo "Clean up the input dir"

View File

@ -210,12 +210,14 @@ func (c *CfgManager) GetDatabaseCfg() *models.Database {
return &models.Database{
Type: c.Get(common.DatabaseType).GetString(),
PostGreSQL: &models.PostGreSQL{
Host: c.Get(common.PostGreSQLHOST).GetString(),
Port: c.Get(common.PostGreSQLPort).GetInt(),
Username: c.Get(common.PostGreSQLUsername).GetString(),
Password: c.Get(common.PostGreSQLPassword).GetString(),
Database: c.Get(common.PostGreSQLDatabase).GetString(),
SSLMode: c.Get(common.PostGreSQLSSLMode).GetString(),
Host: c.Get(common.PostGreSQLHOST).GetString(),
Port: c.Get(common.PostGreSQLPort).GetInt(),
Username: c.Get(common.PostGreSQLUsername).GetString(),
Password: c.Get(common.PostGreSQLPassword).GetString(),
Database: c.Get(common.PostGreSQLDatabase).GetString(),
SSLMode: c.Get(common.PostGreSQLSSLMode).GetString(),
MaxIdleConns: c.Get(common.PostGreSQLMaxIdleConns).GetInt(),
MaxOpenConns: c.Get(common.PostGreSQLMaxOpenConns).GetInt(),
},
}
}

View File

@ -116,6 +116,8 @@ var (
{Name: common.PostGreSQLPort, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_PORT", DefaultValue: "5432", ItemType: &PortType{}, Editable: false},
{Name: common.PostGreSQLSSLMode, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_SSLMODE", DefaultValue: "disable", ItemType: &StringType{}, Editable: false},
{Name: common.PostGreSQLUsername, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_USERNAME", DefaultValue: "postgres", ItemType: &StringType{}, Editable: false},
{Name: common.PostGreSQLMaxIdleConns, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_MAX_IDLE_CONNS", DefaultValue: "2", ItemType: &IntType{}, Editable: false},
{Name: common.PostGreSQLMaxOpenConns, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_MAX_OPEN_CONNS", DefaultValue: "0", ItemType: &IntType{}, Editable: false},
{Name: common.ProjectCreationRestriction, Scope: UserScope, Group: BasicGroup, EnvKey: "PROJECT_CREATION_RESTRICTION", DefaultValue: common.ProCrtRestrEveryone, ItemType: &ProjectCreationRestrictionType{}, Editable: false},
{Name: common.ReadOnly, Scope: UserScope, Group: BasicGroup, EnvKey: "READ_ONLY", DefaultValue: "false", ItemType: &BoolType{}, Editable: false},
@ -151,6 +153,7 @@ var (
{Name: common.RobotTokenDuration, Scope: UserScope, Group: BasicGroup, EnvKey: "ROBOT_TOKEN_DURATION", DefaultValue: "43200", ItemType: &IntType{}, Editable: true},
{Name: common.NotificationEnable, Scope: UserScope, Group: BasicGroup, EnvKey: "NOTIFICATION_ENABLE", DefaultValue: "true", ItemType: &BoolType{}, Editable: true},
{Name: common.QuotaPerProjectEnable, Scope: UserScope, Group: QuotaGroup, EnvKey: "QUOTA_PER_PROJECT_ENABLE", DefaultValue: "true", ItemType: &BoolType{}, Editable: true},
{Name: common.CountPerProject, Scope: UserScope, Group: QuotaGroup, EnvKey: "COUNT_PER_PROJECT", DefaultValue: "-1", ItemType: &QuotaType{}, Editable: true},
{Name: common.StoragePerProject, Scope: UserScope, Group: QuotaGroup, EnvKey: "STORAGE_PER_PROJECT", DefaultValue: "-1", ItemType: &QuotaType{}, Editable: true},
}

View File

@ -53,6 +53,8 @@ const (
PostGreSQLPassword = "postgresql_password"
PostGreSQLDatabase = "postgresql_database"
PostGreSQLSSLMode = "postgresql_sslmode"
PostGreSQLMaxIdleConns = "postgresql_max_idle_conns"
PostGreSQLMaxOpenConns = "postgresql_max_open_conns"
SelfRegistration = "self_registration"
CoreURL = "core_url"
CoreLocalURL = "core_local_url"
@ -147,7 +149,9 @@ const (
// Global notification enable configuration
NotificationEnable = "notification_enable"
// Quota setting items for project
CountPerProject = "count_per_project"
StoragePerProject = "storage_per_project"
QuotaPerProjectEnable = "quota_per_project_enable"
CountPerProject = "count_per_project"
StoragePerProject = "storage_per_project"
)

View File

@ -58,6 +58,7 @@ func UpdateArtifactPullTime(af *models.Artifact) error {
// DeleteArtifact ...
func DeleteArtifact(id int64) error {
_, err := GetOrmer().QueryTable(&models.Artifact{}).Filter("ID", id).Delete()
return err
}

View File

@ -121,12 +121,16 @@ func getDatabase(database *models.Database) (db Database, err error) {
switch database.Type {
case "", "postgresql":
db = NewPGSQL(database.PostGreSQL.Host,
db = NewPGSQL(
database.PostGreSQL.Host,
strconv.Itoa(database.PostGreSQL.Port),
database.PostGreSQL.Username,
database.PostGreSQL.Password,
database.PostGreSQL.Database,
database.PostGreSQL.SSLMode)
database.PostGreSQL.SSLMode,
database.PostGreSQL.MaxIdleConns,
database.PostGreSQL.MaxOpenConns,
)
default:
err = fmt.Errorf("invalid database: %s", database.Type)
}
@ -139,6 +143,8 @@ var once sync.Once
// GetOrmer :set ormer singleton
func GetOrmer() orm.Ormer {
once.Do(func() {
// override the default value(1000) to return all records when setting no limit
orm.DefaultRowsLimit = -1
globalOrm = orm.NewOrm()
})
return globalOrm

View File

@ -78,10 +78,15 @@ func GetBlobsByArtifact(artifactDigest string) ([]*models.Blob, error) {
// GetExclusiveBlobs returns layers of repository:tag which are not shared with other repositories in the project
func GetExclusiveBlobs(projectID int64, repository, digest string) ([]*models.Blob, error) {
var exclusive []*models.Blob
blobs, err := GetBlobsByArtifact(digest)
if err != nil {
return nil, err
}
if len(blobs) == 0 {
return exclusive, nil
}
sql := fmt.Sprintf(`
SELECT
@ -103,13 +108,11 @@ FROM
)
) AS a
LEFT JOIN artifact_blob b ON a.digest = b.digest_af
AND b.digest_blob IN (%s)`, paramPlaceholder(len(blobs)-1))
AND b.digest_blob IN (%s)`, ParamPlaceholderForIn(len(blobs)))
params := []interface{}{projectID, repository, projectID, digest}
for _, blob := range blobs {
if blob.Digest != digest {
params = append(params, blob.Digest)
}
params = append(params, blob.Digest)
}
var rows []struct {
@ -125,9 +128,8 @@ FROM
shared[row.Digest] = true
}
var exclusive []*models.Blob
for _, blob := range blobs {
if blob.Digest != digest && !shared[blob.Digest] {
if !shared[blob.Digest] {
exclusive = append(exclusive, blob)
}
}

View File

@ -133,30 +133,32 @@ func (suite *GetExclusiveBlobsSuite) mustPrepareImage(projectID int64, projectNa
func (suite *GetExclusiveBlobsSuite) TestInSameRepository() {
withProject(func(projectID int64, projectName string) {
digest1 := digest.FromString(utils.GenerateRandomString()).String()
digest2 := digest.FromString(utils.GenerateRandomString()).String()
digest3 := digest.FromString(utils.GenerateRandomString()).String()
manifest1 := suite.mustPrepareImage(projectID, projectName, "mysql", "latest", digest1, digest2)
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) {
suite.Len(blobs, 2)
suite.Len(blobs, 3)
}
manifest2 := suite.mustPrepareImage(projectID, projectName, "mysql", "8.0", digest1, digest2)
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest2); suite.Nil(err) {
suite.Len(blobs, 2)
suite.Len(blobs, 3)
}
manifest3 := suite.mustPrepareImage(projectID, projectName, "mysql", "dev", digest1, digest2, digest3)
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) {
suite.Len(blobs, 0)
suite.Len(blobs, 1)
suite.Equal(manifest1, blobs[0].Digest)
}
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest2); suite.Nil(err) {
suite.Len(blobs, 0)
suite.Len(blobs, 1)
suite.Equal(manifest2, blobs[0].Digest)
}
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest3); suite.Nil(err) {
suite.Len(blobs, 1)
suite.Equal(digest3, blobs[0].Digest)
suite.Len(blobs, 2)
}
})
}
@ -169,7 +171,7 @@ func (suite *GetExclusiveBlobsSuite) TestInDifferentRepositories() {
manifest1 := suite.mustPrepareImage(projectID, projectName, "mysql", "latest", digest1, digest2)
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) {
suite.Len(blobs, 2)
suite.Len(blobs, 3)
}
manifest2 := suite.mustPrepareImage(projectID, projectName, "mariadb", "latest", digest1, digest2)
@ -188,8 +190,7 @@ func (suite *GetExclusiveBlobsSuite) TestInDifferentRepositories() {
suite.Len(blobs, 0)
}
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest3); suite.Nil(err) {
suite.Len(blobs, 1)
suite.Equal(digest3, blobs[0].Digest)
suite.Len(blobs, 2)
}
})
}
@ -201,16 +202,16 @@ func (suite *GetExclusiveBlobsSuite) TestInDifferentProjects() {
manifest1 := suite.mustPrepareImage(projectID, projectName, "mysql", "latest", digest1, digest2)
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) {
suite.Len(blobs, 2)
suite.Len(blobs, 3)
}
withProject(func(id int64, name string) {
manifest2 := suite.mustPrepareImage(id, name, "mysql", "latest", digest1, digest2)
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) {
suite.Len(blobs, 2)
suite.Len(blobs, 3)
}
if blobs, err := GetExclusiveBlobs(id, name+"/mysql", manifest2); suite.Nil(err) {
suite.Len(blobs, 2)
suite.Len(blobs, 3)
}
})

View File

@ -21,6 +21,14 @@ import (
"github.com/goharbor/harbor/src/common/utils/log"
)
// CreateCVEWhitelist creates the CVE whitelist
func CreateCVEWhitelist(l models.CVEWhitelist) (int64, error) {
o := GetOrmer()
itemsBytes, _ := json.Marshal(l.Items)
l.ItemsText = string(itemsBytes)
return o.Insert(&l)
}
// UpdateCVEWhitelist Updates the vulnerability white list to DB
func UpdateCVEWhitelist(l models.CVEWhitelist) (int64, error) {
o := GetOrmer()
@ -30,23 +38,6 @@ func UpdateCVEWhitelist(l models.CVEWhitelist) (int64, error) {
return id, err
}
// GetSysCVEWhitelist Gets the system level vulnerability white list from DB
func GetSysCVEWhitelist() (*models.CVEWhitelist, error) {
return GetCVEWhitelist(0)
}
// UpdateSysCVEWhitelist updates the system level CVE whitelist
/*
func UpdateSysCVEWhitelist(l models.CVEWhitelist) error {
if l.ProjectID != 0 {
return fmt.Errorf("system level CVE whitelist cannot set project ID")
}
l.ProjectID = -1
_, err := UpdateCVEWhitelist(l)
return err
}
*/
// GetCVEWhitelist Gets the CVE whitelist of the project based on the project ID in parameter
func GetCVEWhitelist(pid int64) (*models.CVEWhitelist, error) {
o := GetOrmer()
@ -58,8 +49,7 @@ func GetCVEWhitelist(pid int64) (*models.CVEWhitelist, error) {
return nil, fmt.Errorf("failed to get CVE whitelist for project %d, error: %v", pid, err)
}
if len(r) == 0 {
log.Infof("No CVE whitelist found for project %d, returning empty list.", pid)
return &models.CVEWhitelist{ProjectID: pid, Items: []models.CVEWhitelistItem{}}, nil
return nil, nil
} else if len(r) > 1 {
log.Infof("Multiple CVE whitelists found for project %d, length: %d, returning first element.", pid, len(r))
}

View File

@ -23,12 +23,9 @@ import (
func TestUpdateAndGetCVEWhitelist(t *testing.T) {
require.Nil(t, ClearTable("cve_whitelist"))
l, err := GetSysCVEWhitelist()
assert.Nil(t, err)
assert.Equal(t, models.CVEWhitelist{ProjectID: 0, Items: []models.CVEWhitelistItem{}}, *l)
l2, err := GetCVEWhitelist(5)
assert.Nil(t, err)
assert.Equal(t, models.CVEWhitelist{ProjectID: 5, Items: []models.CVEWhitelistItem{}}, *l2)
assert.Nil(t, l2)
longList := []models.CVEWhitelistItem{}
for i := 0; i < 50; i++ {
@ -46,15 +43,6 @@ func TestUpdateAndGetCVEWhitelist(t *testing.T) {
assert.Equal(t, longList, out1.Items)
assert.Equal(t, e, *out1.ExpiresAt)
in2 := models.CVEWhitelist{ProjectID: 3, Items: []models.CVEWhitelistItem{}}
_, err = UpdateCVEWhitelist(in2)
require.Nil(t, err)
// assert.Equal(t, int64(1), n2)
out2, err := GetCVEWhitelist(3)
require.Nil(t, err)
assert.Equal(t, int64(3), out2.ProjectID)
assert.Equal(t, []models.CVEWhitelistItem{}, out2.Items)
sysCVEs := []models.CVEWhitelistItem{
{CVEID: "CVE-2019-10164"},
{CVEID: "CVE-2017-12345"},
@ -62,11 +50,6 @@ func TestUpdateAndGetCVEWhitelist(t *testing.T) {
in3 := models.CVEWhitelist{Items: sysCVEs}
_, err = UpdateCVEWhitelist(in3)
require.Nil(t, err)
// assert.Equal(t, int64(1), n3)
sysList, err := GetSysCVEWhitelist()
require.Nil(t, err)
assert.Equal(t, int64(0), sysList.ProjectID)
assert.Equal(t, sysCVEs, sysList.Items)
// require.Nil(t, ClearTable("cve_whitelist"))
require.Nil(t, ClearTable("cve_whitelist"))
}

View File

@ -31,12 +31,14 @@ import (
const defaultMigrationPath = "migrations/postgresql/"
type pgsql struct {
host string
port string
usr string
pwd string
database string
sslmode string
host string
port string
usr string
pwd string
database string
sslmode string
maxIdleConns int
maxOpenConns int
}
// Name returns the name of PostgreSQL
@ -51,17 +53,19 @@ func (p *pgsql) String() string {
}
// NewPGSQL returns an instance of postgres
func NewPGSQL(host string, port string, usr string, pwd string, database string, sslmode string) Database {
func NewPGSQL(host string, port string, usr string, pwd string, database string, sslmode string, maxIdleConns int, maxOpenConns int) Database {
if len(sslmode) == 0 {
sslmode = "disable"
}
return &pgsql{
host: host,
port: port,
usr: usr,
pwd: pwd,
database: database,
sslmode: sslmode,
host: host,
port: port,
usr: usr,
pwd: pwd,
database: database,
sslmode: sslmode,
maxIdleConns: maxIdleConns,
maxOpenConns: maxOpenConns,
}
}
@ -82,7 +86,7 @@ func (p *pgsql) Register(alias ...string) error {
info := fmt.Sprintf("host=%s port=%s user=%s password=%s dbname=%s sslmode=%s",
p.host, p.port, p.usr, p.pwd, p.database, p.sslmode)
return orm.RegisterDataBase(an, "postgres", info)
return orm.RegisterDataBase(an, "postgres", info, p.maxIdleConns, p.maxOpenConns)
}
// UpgradeSchema calls migrate tool to upgrade schema to the latest based on the SQL scripts.

View File

@ -44,7 +44,7 @@ func DeleteProjectMetadata(projectID int64, name ...string) error {
params = append(params, projectID)
if len(name) > 0 {
sql += fmt.Sprintf(` and name in ( %s )`, paramPlaceholder(len(name)))
sql += fmt.Sprintf(` and name in ( %s )`, ParamPlaceholderForIn(len(name)))
params = append(params, name)
}
@ -74,7 +74,7 @@ func GetProjectMetadata(projectID int64, name ...string) ([]*models.ProjectMetad
params = append(params, projectID)
if len(name) > 0 {
sql += fmt.Sprintf(` and name in ( %s )`, paramPlaceholder(len(name)))
sql += fmt.Sprintf(` and name in ( %s )`, ParamPlaceholderForIn(len(name)))
params = append(params, name)
}
@ -82,7 +82,9 @@ func GetProjectMetadata(projectID int64, name ...string) ([]*models.ProjectMetad
return proMetas, err
}
func paramPlaceholder(n int) string {
// ParamPlaceholderForIn returns a string that contains placeholders for sql keyword "in"
// e.g. n=3, returns "?,?,?"
func ParamPlaceholderForIn(n int) string {
placeholders := []string{}
for i := 0; i < n; i++ {
placeholders = append(placeholders, "?")

View File

@ -167,9 +167,10 @@ func GetGroupProjects(groupIDs []int, query *models.ProjectQueryParam) ([]*model
from project p
left join project_member pm on p.project_id = pm.project_id
left join user_group ug on ug.id = pm.entity_id and pm.entity_type = 'g'
where ug.id in ( %s ) order by name`,
where ug.id in ( %s )`,
sql, groupIDCondition)
}
sql = sql + ` order by name`
sqlStr, queryParams := CreatePagination(query, sql, params)
log.Debugf("query sql:%v", sql)
var projects []*models.Project
@ -259,7 +260,7 @@ func projectQueryConditions(query *models.ProjectQueryParam) (string, []interfac
}
if len(query.ProjectIDs) > 0 {
sql += fmt.Sprintf(` and p.project_id in ( %s )`,
paramPlaceholder(len(query.ProjectIDs)))
ParamPlaceholderForIn(len(query.ProjectIDs)))
params = append(params, query.ProjectIDs)
}
return sql, params

View File

@ -64,7 +64,7 @@ func RemoveBlobsFromProject(projectID int64, blobs ...*models.Blob) error {
return nil
}
sql := fmt.Sprintf(`DELETE FROM project_blob WHERE blob_id IN (%s)`, paramPlaceholder(len(blobIDs)))
sql := fmt.Sprintf(`DELETE FROM project_blob WHERE blob_id IN (%s)`, ParamPlaceholderForIn(len(blobIDs)))
_, err := GetOrmer().Raw(sql, blobIDs).Exec()
return err
@ -89,7 +89,7 @@ func GetBlobsNotInProject(projectID int64, blobDigests ...string) ([]*models.Blo
}
sql := fmt.Sprintf("SELECT * FROM blob WHERE id NOT IN (SELECT blob_id FROM project_blob WHERE project_id = ?) AND digest IN (%s)",
paramPlaceholder(len(blobDigests)))
ParamPlaceholderForIn(len(blobDigests)))
params := []interface{}{projectID}
for _, digest := range blobDigests {
@ -103,3 +103,34 @@ func GetBlobsNotInProject(projectID int64, blobDigests ...string) ([]*models.Blo
return blobs, nil
}
// CountSizeOfProject ...
func CountSizeOfProject(pid int64) (int64, error) {
var blobs []models.Blob
sql := `
SELECT
DISTINCT bb.digest,
bb.id,
bb.content_type,
bb.size,
bb.creation_time
FROM artifact af
JOIN artifact_blob afnb
ON af.digest = afnb.digest_af
JOIN BLOB bb
ON afnb.digest_blob = bb.digest
WHERE af.project_id = ?
`
_, err := GetOrmer().Raw(sql, pid).QueryRows(&blobs)
if err != nil {
return 0, err
}
var size int64
for _, blob := range blobs {
size += blob.Size
}
return size, err
}

View File

@ -38,3 +38,161 @@ func TestHasBlobInProject(t *testing.T) {
require.Nil(t, err)
assert.True(t, has)
}
func TestCountSizeOfProject(t *testing.T) {
_, err := AddBlob(&models.Blob{
Digest: "CountSizeOfProject_blob1",
Size: 101,
})
require.Nil(t, err)
_, err = AddBlob(&models.Blob{
Digest: "CountSizeOfProject_blob2",
Size: 202,
})
require.Nil(t, err)
_, err = AddBlob(&models.Blob{
Digest: "CountSizeOfProject_blob3",
Size: 303,
})
require.Nil(t, err)
pid1, err := AddProject(models.Project{
Name: "CountSizeOfProject_project1",
OwnerID: 1,
})
require.Nil(t, err)
af := &models.Artifact{
PID: pid1,
Repo: "hello-world",
Tag: "v1",
Digest: "CountSizeOfProject_af1",
Kind: "image",
}
// add
_, err = AddArtifact(af)
require.Nil(t, err)
afnb1 := &models.ArtifactAndBlob{
DigestAF: "CountSizeOfProject_af1",
DigestBlob: "CountSizeOfProject_blob1",
}
afnb2 := &models.ArtifactAndBlob{
DigestAF: "CountSizeOfProject_af1",
DigestBlob: "CountSizeOfProject_blob2",
}
afnb3 := &models.ArtifactAndBlob{
DigestAF: "CountSizeOfProject_af1",
DigestBlob: "CountSizeOfProject_blob3",
}
var afnbs []*models.ArtifactAndBlob
afnbs = append(afnbs, afnb1)
afnbs = append(afnbs, afnb2)
afnbs = append(afnbs, afnb3)
// add
err = AddArtifactNBlobs(afnbs)
require.Nil(t, err)
pSize, err := CountSizeOfProject(pid1)
assert.Equal(t, pSize, int64(606))
}
func TestCountSizeOfProjectDupdigest(t *testing.T) {
_, err := AddBlob(&models.Blob{
Digest: "CountSizeOfProject_blob11",
Size: 101,
})
require.Nil(t, err)
_, err = AddBlob(&models.Blob{
Digest: "CountSizeOfProject_blob22",
Size: 202,
})
require.Nil(t, err)
_, err = AddBlob(&models.Blob{
Digest: "CountSizeOfProject_blob33",
Size: 303,
})
require.Nil(t, err)
_, err = AddBlob(&models.Blob{
Digest: "CountSizeOfProject_blob44",
Size: 404,
})
require.Nil(t, err)
pid1, err := AddProject(models.Project{
Name: "CountSizeOfProject_project11",
OwnerID: 1,
})
require.Nil(t, err)
// add af1 into project
af1 := &models.Artifact{
PID: pid1,
Repo: "hello-world",
Tag: "v1",
Digest: "CountSizeOfProject_af11",
Kind: "image",
}
_, err = AddArtifact(af1)
require.Nil(t, err)
afnb11 := &models.ArtifactAndBlob{
DigestAF: "CountSizeOfProject_af11",
DigestBlob: "CountSizeOfProject_blob11",
}
afnb12 := &models.ArtifactAndBlob{
DigestAF: "CountSizeOfProject_af11",
DigestBlob: "CountSizeOfProject_blob22",
}
afnb13 := &models.ArtifactAndBlob{
DigestAF: "CountSizeOfProject_af11",
DigestBlob: "CountSizeOfProject_blob33",
}
var afnbs1 []*models.ArtifactAndBlob
afnbs1 = append(afnbs1, afnb11)
afnbs1 = append(afnbs1, afnb12)
afnbs1 = append(afnbs1, afnb13)
err = AddArtifactNBlobs(afnbs1)
require.Nil(t, err)
// add af2 into project
af2 := &models.Artifact{
PID: pid1,
Repo: "hello-world",
Tag: "v2",
Digest: "CountSizeOfProject_af22",
Kind: "image",
}
_, err = AddArtifact(af2)
require.Nil(t, err)
afnb21 := &models.ArtifactAndBlob{
DigestAF: "CountSizeOfProject_af22",
DigestBlob: "CountSizeOfProject_blob11",
}
afnb22 := &models.ArtifactAndBlob{
DigestAF: "CountSizeOfProject_af22",
DigestBlob: "CountSizeOfProject_blob22",
}
afnb23 := &models.ArtifactAndBlob{
DigestAF: "CountSizeOfProject_af22",
DigestBlob: "CountSizeOfProject_blob33",
}
afnb24 := &models.ArtifactAndBlob{
DigestAF: "CountSizeOfProject_af22",
DigestBlob: "CountSizeOfProject_blob44",
}
var afnbs2 []*models.ArtifactAndBlob
afnbs2 = append(afnbs2, afnb21)
afnbs2 = append(afnbs2, afnb22)
afnbs2 = append(afnbs2, afnb23)
afnbs2 = append(afnbs2, afnb24)
err = AddArtifactNBlobs(afnbs2)
require.Nil(t, err)
pSize, err := CountSizeOfProject(pid1)
assert.Equal(t, pSize, int64(1010))
}

View File

@ -193,7 +193,7 @@ func quotaQueryConditions(query ...*models.QuotaQuery) (string, []interface{}) {
}
if len(q.ReferenceIDs) != 0 {
sql += fmt.Sprintf(`AND a.reference_id IN (%s) `, paramPlaceholder(len(q.ReferenceIDs)))
sql += fmt.Sprintf(`AND a.reference_id IN (%s) `, ParamPlaceholderForIn(len(q.ReferenceIDs)))
params = append(params, q.ReferenceIDs)
}

View File

@ -111,7 +111,7 @@ func quotaUsageQueryConditions(query ...*models.QuotaUsageQuery) (string, []inte
params = append(params, q.ReferenceID)
}
if len(q.ReferenceIDs) != 0 {
sql += fmt.Sprintf(`and reference_id in (%s) `, paramPlaceholder(len(q.ReferenceIDs)))
sql += fmt.Sprintf(`and reference_id in (%s) `, ParamPlaceholderForIn(len(q.ReferenceIDs)))
params = append(params, q.ReferenceIDs)
}

View File

@ -178,7 +178,7 @@ func repositoryQueryConditions(query ...*models.RepositoryQuery) (string, []inte
if len(q.ProjectIDs) > 0 {
sql += fmt.Sprintf(`and r.project_id in ( %s ) `,
paramPlaceholder(len(q.ProjectIDs)))
ParamPlaceholderForIn(len(q.ProjectIDs)))
params = append(params, q.ProjectIDs)
}

View File

@ -117,12 +117,18 @@ func ListUsers(query *models.UserQuery) ([]models.User, error) {
}
func userQueryConditions(query *models.UserQuery) orm.QuerySeter {
qs := GetOrmer().QueryTable(&models.User{}).
Filter("deleted", 0).
Filter("user_id__gt", 1)
qs := GetOrmer().QueryTable(&models.User{}).Filter("deleted", 0)
if query == nil {
return qs
// Exclude admin account, see https://github.com/goharbor/harbor/issues/2527
return qs.Filter("user_id__gt", 1)
}
if len(query.UserIDs) > 0 {
qs = qs.Filter("user_id__in", query.UserIDs)
} else {
// Exclude admin account when not filter by UserIDs, see https://github.com/goharbor/harbor/issues/2527
qs = qs.Filter("user_id__gt", 1)
}
if len(query.Username) > 0 {
@ -202,7 +208,7 @@ func DeleteUser(userID int) error {
name := fmt.Sprintf("%s#%d", user.Username, user.UserID)
email := fmt.Sprintf("%s#%d", user.Email, user.UserID)
_, err = o.Raw(`update harbor_user
_, err = o.Raw(`update harbor_user
set deleted = true, username = ?, email = ?
where user_id = ?`, name, email, userID).Exec()
return err
@ -234,6 +240,14 @@ func OnBoardUser(u *models.User) error {
}
if created {
u.UserID = int(id)
// current orm framework doesn't support to fetch a pointer or sql.NullString with QueryRow
// https://github.com/astaxie/beego/issues/3767
if len(u.Email) == 0 {
_, err = o.Raw("update harbor_user set email = null where user_id = ? ", id).Exec()
if err != nil {
return err
}
}
} else {
existing, err := GetUser(*u)
if err != nil {

View File

@ -90,3 +90,23 @@ func TestOnBoardUser(t *testing.T) {
assert.True(u.UserID == id)
CleanUser(int64(id))
}
func TestOnBoardUser_EmptyEmail(t *testing.T) {
assert := assert.New(t)
u := &models.User{
Username: "empty_email",
Password: "password1",
Realname: "empty_email",
}
err := OnBoardUser(u)
assert.Nil(err)
id := u.UserID
assert.True(id > 0)
err = OnBoardUser(u)
assert.Nil(err)
assert.True(u.UserID == id)
assert.Equal("", u.Email)
user, err := GetUser(models.User{Username: "empty_email"})
assert.Equal("", user.Email)
CleanUser(int64(id))
}

View File

@ -45,12 +45,14 @@ type SQLite struct {
// PostGreSQL ...
type PostGreSQL struct {
Host string `json:"host"`
Port int `json:"port"`
Username string `json:"username"`
Password string `json:"password,omitempty"`
Database string `json:"database"`
SSLMode string `json:"sslmode"`
Host string `json:"host"`
Port int `json:"port"`
Username string `json:"username"`
Password string `json:"password,omitempty"`
Database string `json:"database"`
SSLMode string `json:"sslmode"`
MaxIdleConns int `json:"max_idle_conns"`
MaxOpenConns int `json:"max_open_conns"`
}
// Email ...

View File

@ -46,6 +46,7 @@ type User struct {
// UserQuery ...
type UserQuery struct {
UserIDs []int
Username string
Email string
Pagination *Pagination

View File

@ -55,11 +55,23 @@ func getProjectsBatchFn(ctx context.Context, keys dataloader.Keys) []*dataloader
return handleError(err)
}
var ownerIDs []int
var projectsMap = make(map[int64]*models.Project, len(projectIDs))
for _, project := range projects {
ownerIDs = append(ownerIDs, project.OwnerID)
projectsMap[project.ProjectID] = project
}
owners, err := dao.ListUsers(&models.UserQuery{UserIDs: ownerIDs})
if err != nil {
return handleError(err)
}
var ownersMap = make(map[int]*models.User, len(owners))
for i, owner := range owners {
ownersMap[owner.UserID] = &owners[i]
}
var results []*dataloader.Result
for _, projectID := range projectIDs {
project, ok := projectsMap[projectID]
@ -67,6 +79,11 @@ func getProjectsBatchFn(ctx context.Context, keys dataloader.Keys) []*dataloader
return handleError(fmt.Errorf("project not found, "+"project_id: %d", projectID))
}
owner, ok := ownersMap[project.OwnerID]
if ok {
project.OwnerName = owner.Username
}
result := dataloader.Result{
Data: project,
Error: nil,

View File

@ -41,7 +41,7 @@ func (suite *DriverSuite) TestLoad() {
obj := dr.RefObject{
"id": int64(1),
"name": "library",
"owner_name": "",
"owner_name": "admin",
}
suite.Equal(obj, ref)

111
src/common/quota/errors.go Normal file
View File

@ -0,0 +1,111 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package quota
import (
"fmt"
"strings"
"github.com/goharbor/harbor/src/pkg/types"
)
// Errors contains all happened errors
type Errors []error
// GetErrors gets all errors that have occurred and returns a slice of errors (Error type)
func (errs Errors) GetErrors() []error {
return errs
}
// Add adds an error to a given slice of errors
func (errs Errors) Add(newErrors ...error) Errors {
for _, err := range newErrors {
if err == nil {
continue
}
if errors, ok := err.(Errors); ok {
errs = errs.Add(errors...)
} else {
ok = true
for _, e := range errs {
if err == e {
ok = false
}
}
if ok {
errs = append(errs, err)
}
}
}
return errs
}
// Error takes a slice of all errors that have occurred and returns it as a formatted string
func (errs Errors) Error() string {
var errors = []string{}
for _, e := range errs {
errors = append(errors, e.Error())
}
return strings.Join(errors, "; ")
}
// ResourceOverflow ...
type ResourceOverflow struct {
Resource types.ResourceName
HardLimit int64
CurrentUsed int64
NewUsed int64
}
func (e *ResourceOverflow) Error() string {
resource := e.Resource
var (
op string
delta int64
)
if e.NewUsed > e.CurrentUsed {
op = "add"
delta = e.NewUsed - e.CurrentUsed
} else {
op = "subtract"
delta = e.CurrentUsed - e.NewUsed
}
return fmt.Sprintf("%s %s of %s resource overflow the hard limit, current usage is %s and hard limit is %s",
op, resource.FormatValue(delta), resource,
resource.FormatValue(e.CurrentUsed), resource.FormatValue(e.HardLimit))
}
// NewResourceOverflowError ...
func NewResourceOverflowError(resource types.ResourceName, hardLimit, currentUsed, newUsed int64) error {
return &ResourceOverflow{Resource: resource, HardLimit: hardLimit, CurrentUsed: currentUsed, NewUsed: newUsed}
}
// ResourceNotFound ...
type ResourceNotFound struct {
Resource types.ResourceName
}
func (e *ResourceNotFound) Error() string {
return fmt.Sprintf("resource %s not found", e.Resource)
}
// NewResourceNotFoundError ...
func NewResourceNotFoundError(resource types.ResourceName) error {
return &ResourceNotFound{Resource: resource}
}

View File

@ -110,7 +110,8 @@ func (m *Manager) getUsageForUpdate(o orm.Ormer) (*models.QuotaUsage, error) {
}
func (m *Manager) updateUsage(o orm.Ormer, resources types.ResourceList,
calculate func(types.ResourceList, types.ResourceList) types.ResourceList) error {
calculate func(types.ResourceList, types.ResourceList) types.ResourceList,
skipOverflow bool) error {
quota, err := m.getQuotaForUpdate(o)
if err != nil {
@ -131,7 +132,13 @@ func (m *Manager) updateUsage(o orm.Ormer, resources types.ResourceList,
}
newUsed := calculate(used, resources)
if err := isSafe(hardLimits, newUsed); err != nil {
// ensure that new used is never negative
if negativeUsed := types.IsNegative(newUsed); len(negativeUsed) > 0 {
return fmt.Errorf("quota usage is negative for resource(s): %s", prettyPrintResourceNames(negativeUsed))
}
if err := isSafe(hardLimits, used, newUsed, skipOverflow); err != nil {
return err
}
@ -176,27 +183,87 @@ func (m *Manager) DeleteQuota() error {
// UpdateQuota update the quota resource spec
func (m *Manager) UpdateQuota(hardLimits types.ResourceList) error {
o := dao.GetOrmer()
if err := m.driver.Validate(hardLimits); err != nil {
return err
}
sql := `UPDATE quota SET hard = ? WHERE reference = ? AND reference_id = ?`
_, err := dao.GetOrmer().Raw(sql, hardLimits.String(), m.reference, m.referenceID).Exec()
_, err := o.Raw(sql, hardLimits.String(), m.reference, m.referenceID).Exec()
return err
}
// SetResourceUsage sets the usage per resource name
func (m *Manager) SetResourceUsage(resource types.ResourceName, value int64) error {
o := dao.GetOrmer()
sql := fmt.Sprintf("UPDATE quota_usage SET used = jsonb_set(used, '{%s}', to_jsonb(%d::int), true) WHERE reference = ? AND reference_id = ?", resource, value)
_, err := o.Raw(sql, m.reference, m.referenceID).Exec()
return err
}
// EnsureQuota ensures the reference has quota and usage,
// if non-existent, will create new quota and usage.
// if existent, update the quota and usage.
func (m *Manager) EnsureQuota(usages types.ResourceList) error {
query := &models.QuotaQuery{
Reference: m.reference,
ReferenceID: m.referenceID,
}
quotas, err := dao.ListQuotas(query)
if err != nil {
return err
}
// non-existent: create quota and usage
defaultHardLimit := m.driver.HardLimits()
if len(quotas) == 0 {
_, err := m.NewQuota(defaultHardLimit, usages)
if err != nil {
return err
}
return nil
}
// existent
used := usages
quotaUsed, err := types.NewResourceList(quotas[0].Used)
if err != nil {
return err
}
if types.Equals(quotaUsed, used) {
return nil
}
dao.WithTransaction(func(o orm.Ormer) error {
usage, err := m.getUsageForUpdate(o)
if err != nil {
return err
}
usage.Used = used.String()
usage.UpdateTime = time.Now()
_, err = o.Update(usage)
if err != nil {
return err
}
return nil
})
return nil
}
// AddResources add resources to usage
func (m *Manager) AddResources(resources types.ResourceList) error {
return dao.WithTransaction(func(o orm.Ormer) error {
return m.updateUsage(o, resources, types.Add)
return m.updateUsage(o, resources, types.Add, false)
})
}
// SubtractResources subtract resources from usage
func (m *Manager) SubtractResources(resources types.ResourceList) error {
return dao.WithTransaction(func(o orm.Ormer) error {
return m.updateUsage(o, resources, types.Subtract)
return m.updateUsage(o, resources, types.Subtract, true)
})
}

View File

@ -21,6 +21,7 @@ import (
"testing"
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/quota/driver"
"github.com/goharbor/harbor/src/common/quota/driver/mocks"
"github.com/goharbor/harbor/src/pkg/types"
@ -131,6 +132,66 @@ func (suite *ManagerSuite) TestUpdateQuota() {
}
}
func (suite *ManagerSuite) TestSetResourceUsage() {
mgr := suite.quotaManager()
id, _ := mgr.NewQuota(hardLimits)
if err := mgr.SetResourceUsage(types.ResourceCount, 123); suite.Nil(err) {
quota, _ := dao.GetQuota(id)
suite.Equal(hardLimits, mustResourceList(quota.Hard))
usage, _ := dao.GetQuotaUsage(id)
suite.Equal(types.ResourceList{types.ResourceCount: 123, types.ResourceStorage: 0}, mustResourceList(usage.Used))
}
if err := mgr.SetResourceUsage(types.ResourceStorage, 234); suite.Nil(err) {
usage, _ := dao.GetQuotaUsage(id)
suite.Equal(types.ResourceList{types.ResourceCount: 123, types.ResourceStorage: 234}, mustResourceList(usage.Used))
}
}
func (suite *ManagerSuite) TestEnsureQuota() {
// non-existent
nonExistRefID := "3"
mgr := suite.quotaManager(nonExistRefID)
infinite := types.ResourceList{types.ResourceCount: -1, types.ResourceStorage: -1}
usage := types.ResourceList{types.ResourceCount: 10, types.ResourceStorage: 10}
err := mgr.EnsureQuota(usage)
suite.Nil(err)
query := &models.QuotaQuery{
Reference: reference,
ReferenceID: nonExistRefID,
}
quotas, err := dao.ListQuotas(query)
suite.Nil(err)
suite.Equal(usage, mustResourceList(quotas[0].Used))
suite.Equal(infinite, mustResourceList(quotas[0].Hard))
// existent
existRefID := "4"
mgr = suite.quotaManager(existRefID)
used := types.ResourceList{types.ResourceCount: 11, types.ResourceStorage: 11}
if id, err := mgr.NewQuota(hardLimits, used); suite.Nil(err) {
quota, _ := dao.GetQuota(id)
suite.Equal(hardLimits, mustResourceList(quota.Hard))
usage, _ := dao.GetQuotaUsage(id)
suite.Equal(used, mustResourceList(usage.Used))
}
usage2 := types.ResourceList{types.ResourceCount: 12, types.ResourceStorage: 12}
err = mgr.EnsureQuota(usage2)
suite.Nil(err)
query2 := &models.QuotaQuery{
Reference: reference,
ReferenceID: existRefID,
}
quotas2, err := dao.ListQuotas(query2)
suite.Equal(usage2, mustResourceList(quotas2[0].Used))
suite.Equal(hardLimits, mustResourceList(quotas2[0].Hard))
}
func (suite *ManagerSuite) TestQuotaAutoCreation() {
for i := 0; i < 10; i++ {
mgr := suite.quotaManager(fmt.Sprintf("%d", i))
@ -157,7 +218,11 @@ func (suite *ManagerSuite) TestAddResources() {
}
if err := mgr.AddResources(types.ResourceList{types.ResourceStorage: 10000}); suite.Error(err) {
suite.True(IsUnsafeError(err))
if errs, ok := err.(Errors); suite.True(ok) {
for _, err := range errs {
suite.IsType(&ResourceOverflow{}, err)
}
}
}
}

View File

@ -15,48 +15,43 @@
package quota
import (
"fmt"
"sort"
"strings"
"github.com/goharbor/harbor/src/pkg/types"
)
type unsafe struct {
message string
}
func isSafe(hardLimits types.ResourceList, currentUsed types.ResourceList, newUsed types.ResourceList, skipOverflow bool) error {
var errs Errors
func (err *unsafe) Error() string {
return err.message
}
func newUnsafe(message string) error {
return &unsafe{message: message}
}
// IsUnsafeError returns true when the err is unsafe error
func IsUnsafeError(err error) bool {
_, ok := err.(*unsafe)
return ok
}
func isSafe(hardLimits types.ResourceList, used types.ResourceList) error {
for key, value := range used {
if value < 0 {
return newUnsafe(fmt.Sprintf("bad used value: %d", value))
for resource, value := range newUsed {
hardLimit, found := hardLimits[resource]
if !found {
errs = errs.Add(NewResourceNotFoundError(resource))
continue
}
if hard, found := hardLimits[key]; found {
if hard == types.UNLIMITED {
continue
}
if value > hard {
return newUnsafe(fmt.Sprintf("over the quota: used %d but only hard %d", value, hard))
}
} else {
return newUnsafe(fmt.Sprintf("hard limit not found: %s", key))
if hardLimit == types.UNLIMITED || value == currentUsed[resource] {
continue
}
if value > hardLimit && !skipOverflow {
errs = errs.Add(NewResourceOverflowError(resource, hardLimit, currentUsed[resource], value))
}
}
if len(errs) > 0 {
return errs
}
return nil
}
func prettyPrintResourceNames(a []types.ResourceName) string {
values := []string{}
for _, value := range a {
values = append(values, string(value))
}
sort.Strings(values)
return strings.Join(values, ",")
}

View File

@ -15,45 +15,17 @@
package quota
import (
"errors"
"testing"
"github.com/goharbor/harbor/src/pkg/types"
)
func TestIsUnsafeError(t *testing.T) {
func Test_isSafe(t *testing.T) {
type args struct {
err error
}
tests := []struct {
name string
args args
want bool
}{
{
"is unsafe error",
args{err: newUnsafe("unsafe")},
true,
},
{
"is not unsafe error",
args{err: errors.New("unsafe")},
false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := IsUnsafeError(tt.args.err); got != tt.want {
t.Errorf("IsUnsafeError() = %v, want %v", got, tt.want)
}
})
}
}
func Test_checkQuotas(t *testing.T) {
type args struct {
hardLimits types.ResourceList
used types.ResourceList
hardLimits types.ResourceList
currentUsed types.ResourceList
newUsed types.ResourceList
skipOverflow bool
}
tests := []struct {
name string
@ -62,33 +34,58 @@ func Test_checkQuotas(t *testing.T) {
}{
{
"unlimited",
args{hardLimits: types.ResourceList{types.ResourceStorage: types.UNLIMITED}, used: types.ResourceList{types.ResourceStorage: 1000}},
args{
types.ResourceList{types.ResourceStorage: types.UNLIMITED},
types.ResourceList{types.ResourceStorage: 1000},
types.ResourceList{types.ResourceStorage: 1000},
false,
},
false,
},
{
"ok",
args{hardLimits: types.ResourceList{types.ResourceStorage: 100}, used: types.ResourceList{types.ResourceStorage: 1}},
args{
types.ResourceList{types.ResourceStorage: 100},
types.ResourceList{types.ResourceStorage: 10},
types.ResourceList{types.ResourceStorage: 1},
false,
},
false,
},
{
"bad used value",
args{hardLimits: types.ResourceList{types.ResourceStorage: 100}, used: types.ResourceList{types.ResourceStorage: -1}},
"over the hard limit",
args{
types.ResourceList{types.ResourceStorage: 100},
types.ResourceList{types.ResourceStorage: 0},
types.ResourceList{types.ResourceStorage: 200},
false,
},
true,
},
{
"over the hard limit",
args{hardLimits: types.ResourceList{types.ResourceStorage: 100}, used: types.ResourceList{types.ResourceStorage: 200}},
true,
"skip overflow",
args{
types.ResourceList{types.ResourceStorage: 100},
types.ResourceList{types.ResourceStorage: 0},
types.ResourceList{types.ResourceStorage: 200},
true,
},
false,
},
{
"hard limit not found",
args{hardLimits: types.ResourceList{types.ResourceStorage: 100}, used: types.ResourceList{types.ResourceCount: 1}},
args{
types.ResourceList{types.ResourceStorage: 100},
types.ResourceList{types.ResourceCount: 0},
types.ResourceList{types.ResourceCount: 1},
false,
},
true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := isSafe(tt.args.hardLimits, tt.args.used); (err != nil) != tt.wantErr {
if err := isSafe(tt.args.hardLimits, tt.args.currentUsed, tt.args.newUsed, tt.args.skipOverflow); (err != nil) != tt.wantErr {
t.Errorf("isSafe() error = %v, wantErr %v", err, tt.wantErr)
}
})

View File

@ -31,8 +31,8 @@ type Namespace interface {
}
type projectNamespace struct {
projectIDOrName interface{}
isPublic bool
projectID int64
isPublic bool
}
func (ns *projectNamespace) Kind() string {
@ -40,11 +40,11 @@ func (ns *projectNamespace) Kind() string {
}
func (ns *projectNamespace) Resource(subresources ...Resource) Resource {
return Resource(fmt.Sprintf("/project/%v", ns.projectIDOrName)).Subresource(subresources...)
return Resource(fmt.Sprintf("/project/%d", ns.projectID)).Subresource(subresources...)
}
func (ns *projectNamespace) Identity() interface{} {
return ns.projectIDOrName
return ns.projectID
}
func (ns *projectNamespace) IsPublic() bool {
@ -52,10 +52,10 @@ func (ns *projectNamespace) IsPublic() bool {
}
// NewProjectNamespace returns namespace for project
func NewProjectNamespace(projectIDOrName interface{}, isPublic ...bool) Namespace {
func NewProjectNamespace(projectID int64, isPublic ...bool) Namespace {
isPublicNamespace := false
if len(isPublic) > 0 {
isPublicNamespace = isPublic[0]
}
return &projectNamespace{projectIDOrName: projectIDOrName, isPublic: isPublicNamespace}
return &projectNamespace{projectID: projectID, isPublic: isPublicNamespace}
}

View File

@ -27,7 +27,7 @@ type ProjectNamespaceTestSuite struct {
func (suite *ProjectNamespaceTestSuite) TestResource() {
var namespace Namespace
namespace = &projectNamespace{projectIDOrName: int64(1)}
namespace = &projectNamespace{projectID: int64(1)}
suite.Equal(namespace.Resource(Resource("image")), Resource("/project/1/image"))
}
@ -35,9 +35,6 @@ func (suite *ProjectNamespaceTestSuite) TestResource() {
func (suite *ProjectNamespaceTestSuite) TestIdentity() {
namespace, _ := Resource("/project/1/image").GetNamespace()
suite.Equal(namespace.Identity(), int64(1))
namespace, _ = Resource("/project/library/image").GetNamespace()
suite.Equal(namespace.Identity(), "library")
}
func TestProjectNamespaceTestSuite(t *testing.T) {

View File

@ -37,14 +37,10 @@ func projectNamespaceParser(resource Resource) (Namespace, error) {
return nil, errors.New("not support resource")
}
var projectIDOrName interface{}
id, err := strconv.ParseInt(matches[1], 10, 64)
if err == nil {
projectIDOrName = id
} else {
projectIDOrName = matches[1]
projectID, err := strconv.ParseInt(matches[1], 10, 64)
if err != nil {
return nil, err
}
return &projectNamespace{projectIDOrName: projectIDOrName}, nil
return &projectNamespace{projectID: projectID}, nil
}

View File

@ -26,7 +26,7 @@ type ProjectParserTestSuite struct {
func (suite *ProjectParserTestSuite) TestParse() {
namespace, err := projectNamespaceParser(Resource("/project/1/image"))
suite.Equal(namespace, &projectNamespace{projectIDOrName: int64(1)})
suite.Equal(namespace, &projectNamespace{projectID: 1})
suite.Nil(err)
namespace, err = projectNamespaceParser(Resource("/fake/1/image"))

View File

@ -50,8 +50,8 @@ type VisitorTestSuite struct {
}
func (suite *VisitorTestSuite) TestGetPolicies() {
namespace := rbac.NewProjectNamespace("library", false)
publicNamespace := rbac.NewProjectNamespace("library", true)
namespace := rbac.NewProjectNamespace(1, false)
publicNamespace := rbac.NewProjectNamespace(1, true)
anonymous := NewUser(anonymousCtx, namespace)
suite.Nil(anonymous.GetPolicies())
@ -73,7 +73,7 @@ func (suite *VisitorTestSuite) TestGetPolicies() {
}
func (suite *VisitorTestSuite) TestGetRoles() {
namespace := rbac.NewProjectNamespace("library", false)
namespace := rbac.NewProjectNamespace(1, false)
anonymous := NewUser(anonymousCtx, namespace)
suite.Nil(anonymous.GetRoles())

View File

@ -75,10 +75,10 @@ func (s *SecurityContext) Can(action rbac.Action, resource rbac.Resource) bool {
if err == nil {
switch ns.Kind() {
case "project":
projectIDOrName := ns.Identity()
isPublicProject, _ := s.pm.IsPublic(projectIDOrName)
projectNamespace := rbac.NewProjectNamespace(projectIDOrName, isPublicProject)
user := project.NewUser(s, projectNamespace, s.GetProjectRoles(projectIDOrName)...)
projectID := ns.Identity().(int64)
isPublicProject, _ := s.pm.IsPublic(projectID)
projectNamespace := rbac.NewProjectNamespace(projectID, isPublicProject)
user := project.NewUser(s, projectNamespace, s.GetProjectRoles(projectID)...)
return rbac.HasPermission(user, resource, action)
}
}

View File

@ -72,10 +72,10 @@ func (s *SecurityContext) Can(action rbac.Action, resource rbac.Resource) bool {
if err == nil {
switch ns.Kind() {
case "project":
projectIDOrName := ns.Identity()
isPublicProject, _ := s.pm.IsPublic(projectIDOrName)
projectNamespace := rbac.NewProjectNamespace(projectIDOrName, isPublicProject)
user := project.NewUser(s, projectNamespace, s.GetProjectRoles(projectIDOrName)...)
projectID := ns.Identity().(int64)
isPublicProject, _ := s.pm.IsPublic(projectID)
projectNamespace := rbac.NewProjectNamespace(projectID, isPublicProject)
user := project.NewUser(s, projectNamespace, s.GetProjectRoles(projectID)...)
return rbac.HasPermission(user, resource, action)
}
}

View File

@ -176,12 +176,12 @@ func TestHasPullPerm(t *testing.T) {
// public project
ctx := NewSecurityContext(nil, pm)
resource := rbac.NewProjectNamespace("library").Resource(rbac.ResourceRepository)
resource := rbac.NewProjectNamespace(1).Resource(rbac.ResourceRepository)
assert.True(t, ctx.Can(rbac.ActionPull, resource))
// private project, unauthenticated
ctx = NewSecurityContext(nil, pm)
resource = rbac.NewProjectNamespace(private.Name).Resource(rbac.ResourceRepository)
resource = rbac.NewProjectNamespace(private.ProjectID).Resource(rbac.ResourceRepository)
assert.False(t, ctx.Can(rbac.ActionPull, resource))
// private project, authenticated, has no perm
@ -203,7 +203,7 @@ func TestHasPullPerm(t *testing.T) {
}
func TestHasPushPerm(t *testing.T) {
resource := rbac.NewProjectNamespace(private.Name).Resource(rbac.ResourceRepository)
resource := rbac.NewProjectNamespace(private.ProjectID).Resource(rbac.ResourceRepository)
// unauthenticated
ctx := NewSecurityContext(nil, pm)
@ -226,7 +226,7 @@ func TestHasPushPerm(t *testing.T) {
}
func TestHasPushPullPerm(t *testing.T) {
resource := rbac.NewProjectNamespace(private.Name).Resource(rbac.ResourceRepository)
resource := rbac.NewProjectNamespace(private.ProjectID).Resource(rbac.ResourceRepository)
// unauthenticated
ctx := NewSecurityContext(nil, pm)
@ -265,7 +265,7 @@ func TestHasPushPullPermWithGroup(t *testing.T) {
developer.GroupIDs = []int{userGroups[0].ID}
resource := rbac.NewProjectNamespace(project.Name).Resource(rbac.ResourceRepository)
resource := rbac.NewProjectNamespace(project.ProjectID).Resource(rbac.ResourceRepository)
ctx := NewSecurityContext(developer, pm)
assert.True(t, ctx.Can(rbac.ActionPush, resource))

View File

@ -76,9 +76,9 @@ func (s *SecurityContext) Can(action rbac.Action, resource rbac.Resource) bool {
if err == nil {
switch ns.Kind() {
case "project":
projectIDOrName := ns.Identity()
isPublicProject, _ := s.pm.IsPublic(projectIDOrName)
projectNamespace := rbac.NewProjectNamespace(projectIDOrName, isPublicProject)
projectID := ns.Identity().(int64)
isPublicProject, _ := s.pm.IsPublic(projectID)
projectNamespace := rbac.NewProjectNamespace(projectID, isPublicProject)
robot := NewRobot(s.GetUsername(), projectNamespace, s.policy)
return rbac.HasPermission(robot, resource, action)
}

View File

@ -15,6 +15,7 @@
package robot
import (
"fmt"
"os"
"strconv"
"testing"
@ -136,7 +137,7 @@ func TestIsSolutionUser(t *testing.T) {
func TestHasPullPerm(t *testing.T) {
policies := []*rbac.Policy{
{
Resource: "/project/testrobot/repository",
Resource: rbac.Resource(fmt.Sprintf("/project/%d/repository", private.ProjectID)),
Action: rbac.ActionPull,
},
}
@ -146,14 +147,14 @@ func TestHasPullPerm(t *testing.T) {
}
ctx := NewSecurityContext(robot, pm, policies)
resource := rbac.NewProjectNamespace(private.Name).Resource(rbac.ResourceRepository)
resource := rbac.NewProjectNamespace(private.ProjectID).Resource(rbac.ResourceRepository)
assert.True(t, ctx.Can(rbac.ActionPull, resource))
}
func TestHasPushPerm(t *testing.T) {
policies := []*rbac.Policy{
{
Resource: "/project/testrobot/repository",
Resource: rbac.Resource(fmt.Sprintf("/project/%d/repository", private.ProjectID)),
Action: rbac.ActionPush,
},
}
@ -163,18 +164,18 @@ func TestHasPushPerm(t *testing.T) {
}
ctx := NewSecurityContext(robot, pm, policies)
resource := rbac.NewProjectNamespace(private.Name).Resource(rbac.ResourceRepository)
resource := rbac.NewProjectNamespace(private.ProjectID).Resource(rbac.ResourceRepository)
assert.True(t, ctx.Can(rbac.ActionPush, resource))
}
func TestHasPushPullPerm(t *testing.T) {
policies := []*rbac.Policy{
{
Resource: "/project/testrobot/repository",
Resource: rbac.Resource(fmt.Sprintf("/project/%d/repository", private.ProjectID)),
Action: rbac.ActionPush,
},
{
Resource: "/project/testrobot/repository",
Resource: rbac.Resource(fmt.Sprintf("/project/%d/repository", private.ProjectID)),
Action: rbac.ActionPull,
},
}
@ -184,7 +185,7 @@ func TestHasPushPullPerm(t *testing.T) {
}
ctx := NewSecurityContext(robot, pm, policies)
resource := rbac.NewProjectNamespace(private.Name).Resource(rbac.ResourceRepository)
resource := rbac.NewProjectNamespace(private.ProjectID).Resource(rbac.ResourceRepository)
assert.True(t, ctx.Can(rbac.ActionPush, resource) && ctx.Can(rbac.ActionPull, resource))
}

View File

@ -1,9 +1,24 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package robot
import (
"testing"
"github.com/goharbor/harbor/src/common/rbac"
"github.com/stretchr/testify/assert"
"testing"
)
func TestGetPolicies(t *testing.T) {
@ -17,7 +32,7 @@ func TestGetPolicies(t *testing.T) {
robot := robot{
username: "test",
namespace: rbac.NewProjectNamespace("library", false),
namespace: rbac.NewProjectNamespace(1, false),
policy: policies,
}

View File

@ -220,6 +220,27 @@ func (session *Session) SearchUser(username string) ([]models.LdapUser, error) {
}
u.GroupDNList = groupDNList
}
log.Debugf("Searching for nested groups")
nestedGroupDNList := []string{}
nestedGroupFilter := createNestedGroupFilter(ldapEntry.DN)
result, err := session.SearchLdap(nestedGroupFilter)
if err != nil {
return nil, err
}
for _, groupEntry := range result.Entries {
if !contains(u.GroupDNList, groupEntry.DN) {
nestedGroupDNList = append(nestedGroupDNList, strings.TrimSpace(groupEntry.DN))
log.Debugf("Found group %v", groupEntry.DN)
} else {
log.Debugf("%v is already in GroupDNList", groupEntry.DN)
}
}
u.GroupDNList = append(u.GroupDNList, nestedGroupDNList...)
log.Debugf("Done searching for nested groups")
u.DN = ldapEntry.DN
ldapUsers = append(ldapUsers, u)
@ -330,13 +351,13 @@ func (session *Session) createUserFilter(username string) string {
filterTag = goldap.EscapeFilter(username)
}
ldapFilter := session.ldapConfig.LdapFilter
ldapFilter := normalizeFilter(session.ldapConfig.LdapFilter)
ldapUID := session.ldapConfig.LdapUID
if ldapFilter == "" {
ldapFilter = "(" + ldapUID + "=" + filterTag + ")"
} else {
ldapFilter = "(&" + ldapFilter + "(" + ldapUID + "=" + filterTag + "))"
ldapFilter = "(&(" + ldapFilter + ")(" + ldapUID + "=" + filterTag + "))"
}
log.Debug("ldap filter :", ldapFilter)
@ -404,6 +425,7 @@ func createGroupSearchFilter(oldFilter, groupName, groupNameAttribute string) st
filter := ""
groupName = goldap.EscapeFilter(groupName)
groupNameAttribute = goldap.EscapeFilter(groupNameAttribute)
oldFilter = normalizeFilter(oldFilter)
if len(oldFilter) == 0 {
if len(groupName) == 0 {
filter = groupNameAttribute + "=*"
@ -419,3 +441,26 @@ func createGroupSearchFilter(oldFilter, groupName, groupNameAttribute string) st
}
return filter
}
func createNestedGroupFilter(userDN string) string {
filter := ""
filter = "(&(objectClass=group)(member:1.2.840.113556.1.4.1941:=" + userDN + "))"
return filter
}
func contains(s []string, e string) bool {
for _, a := range s {
if a == e {
return true
}
}
return false
}
// normalizeFilter - remove '(' and ')' in ldap filter
func normalizeFilter(filter string) string {
norFilter := strings.TrimSpace(filter)
norFilter = strings.TrimPrefix(norFilter, "(")
norFilter = strings.TrimSuffix(norFilter, ")")
return norFilter
}

View File

@ -369,3 +369,25 @@ func TestSession_SearchGroupByDN(t *testing.T) {
})
}
}
func TestNormalizeFilter(t *testing.T) {
type args struct {
filter string
}
tests := []struct {
name string
args args
want string
}{
{"normal test", args{"(objectclass=user)"}, "objectclass=user"},
{"with space", args{" (objectclass=user) "}, "objectclass=user"},
{"nothing", args{"objectclass=user"}, "objectclass=user"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := normalizeFilter(tt.args.filter); got != tt.want {
t.Errorf("normalizeFilter() = %v, want %v", got, tt.want)
}
})
}
}

View File

@ -25,11 +25,9 @@ import (
"sort"
"strconv"
"strings"
// "time"
"github.com/docker/distribution/manifest/schema1"
"github.com/docker/distribution/manifest/schema2"
commonhttp "github.com/goharbor/harbor/src/common/http"
"github.com/goharbor/harbor/src/common/utils"
)
@ -407,6 +405,7 @@ func (r *Repository) monolithicBlobUpload(location, digest string, size int64, d
if err != nil {
return err
}
req.ContentLength = size
resp, err := r.client.Do(req)
if err != nil {

View File

@ -15,22 +15,24 @@
package api
import (
"encoding/json"
"errors"
"github.com/goharbor/harbor/src/pkg/retention"
"github.com/goharbor/harbor/src/pkg/scheduler"
"fmt"
"net/http"
"github.com/ghodss/yaml"
"github.com/goharbor/harbor/src/common/api"
"github.com/goharbor/harbor/src/common/rbac"
"github.com/goharbor/harbor/src/common/security"
"github.com/goharbor/harbor/src/common/utils"
"github.com/goharbor/harbor/src/common/utils/log"
"github.com/goharbor/harbor/src/core/config"
"github.com/goharbor/harbor/src/core/filter"
"github.com/goharbor/harbor/src/core/promgr"
"github.com/goharbor/harbor/src/pkg/project"
"github.com/goharbor/harbor/src/pkg/repository"
"github.com/goharbor/harbor/src/pkg/retention"
"github.com/goharbor/harbor/src/pkg/scheduler"
)
const (
@ -47,6 +49,10 @@ var (
retentionController retention.APIController
)
var (
errNotFound = errors.New("not found")
)
// BaseController ...
type BaseController struct {
api.BaseAPI
@ -77,6 +83,71 @@ func (b *BaseController) Prepare() {
b.ProjectMgr = pm
}
// RequireAuthenticated returns true when the request is authenticated
// otherwise send Unauthorized response and returns false
func (b *BaseController) RequireAuthenticated() bool {
if !b.SecurityCtx.IsAuthenticated() {
b.SendUnAuthorizedError(errors.New("Unauthorized"))
return false
}
return true
}
// HasProjectPermission returns true when the request has action permission on project subresource
func (b *BaseController) HasProjectPermission(projectIDOrName interface{}, action rbac.Action, subresource ...rbac.Resource) (bool, error) {
projectID, projectName, err := utils.ParseProjectIDOrName(projectIDOrName)
if err != nil {
return false, err
}
if projectName != "" {
project, err := b.ProjectMgr.Get(projectName)
if err != nil {
return false, err
}
if project == nil {
return false, errNotFound
}
projectID = project.ProjectID
}
resource := rbac.NewProjectNamespace(projectID).Resource(subresource...)
if !b.SecurityCtx.Can(action, resource) {
return false, nil
}
return true, nil
}
// RequireProjectAccess returns true when the request has action access on project subresource
// otherwise send UnAuthorized or Forbidden response and returns false
func (b *BaseController) RequireProjectAccess(projectIDOrName interface{}, action rbac.Action, subresource ...rbac.Resource) bool {
hasPermission, err := b.HasProjectPermission(projectIDOrName, action, subresource...)
if err != nil {
if err == errNotFound {
b.SendNotFoundError(fmt.Errorf("project %v not found", projectIDOrName))
} else {
b.SendInternalServerError(err)
}
return false
}
if !hasPermission {
if !b.SecurityCtx.IsAuthenticated() {
b.SendUnAuthorizedError(errors.New("UnAuthorized"))
} else {
b.SendForbiddenError(errors.New(b.SecurityCtx.GetUsername()))
}
return false
}
return true
}
// WriteJSONData writes the JSON data to the client.
func (b *BaseController) WriteJSONData(object interface{}) {
b.Data["json"] = object
@ -121,12 +192,16 @@ func Init() error {
retentionController = retention.NewAPIController(retentionMgr, projectMgr, repositoryMgr, retentionScheduler, retentionLauncher)
callbackFun := func(p interface{}) error {
r, ok := p.(retention.TriggerParam)
if ok {
_, err := retentionController.TriggerRetentionExec(r.PolicyID, r.Trigger, false)
return err
str, ok := p.(string)
if !ok {
return fmt.Errorf("the type of param %v isn't string", p)
}
return errors.New("bad retention callback param")
param := &retention.TriggerParam{}
if err := json.Unmarshal([]byte(str), param); err != nil {
return fmt.Errorf("failed to unmarshal the param: %v", err)
}
_, err := retentionController.TriggerRetentionExec(param.PolicyID, param.Trigger, false)
return err
}
err := scheduler.Register(retention.SchedulerCallback, callbackFun)

View File

@ -58,14 +58,7 @@ func (cla *ChartLabelAPI) Prepare() {
}
func (cla *ChartLabelAPI) requireAccess(action rbac.Action) bool {
resource := rbac.NewProjectNamespace(cla.project.ProjectID).Resource(rbac.ResourceHelmChartVersionLabel)
if !cla.SecurityCtx.Can(action, resource) {
cla.SendForbiddenError(errors.New(cla.SecurityCtx.GetUsername()))
return false
}
return true
return cla.RequireProjectAccess(cla.project.ProjectID, action, rbac.ResourceHelmChartVersionLabel)
}
// MarkLabel handles the request of marking label to chart.

View File

@ -105,19 +105,8 @@ func (cra *ChartRepositoryAPI) requireAccess(action rbac.Action, subresource ...
if len(subresource) == 0 {
subresource = append(subresource, rbac.ResourceHelmChart)
}
resource := rbac.NewProjectNamespace(cra.namespace).Resource(subresource...)
if !cra.SecurityCtx.Can(action, resource) {
if !cra.SecurityCtx.IsAuthenticated() {
cra.SendUnAuthorizedError(errors.New("Unauthorized"))
} else {
cra.SendForbiddenError(errors.New(cra.SecurityCtx.GetUsername()))
}
return false
}
return true
return cra.RequireProjectAccess(cra.namespace, action, subresource...)
}
// GetHealthStatus handles GET /api/chartrepo/health

View File

@ -35,6 +35,7 @@ import (
testutils "github.com/goharbor/harbor/src/common/utils/test"
api_models "github.com/goharbor/harbor/src/core/api/models"
apimodels "github.com/goharbor/harbor/src/core/api/models"
quota "github.com/goharbor/harbor/src/core/api/quota"
_ "github.com/goharbor/harbor/src/core/auth/db"
_ "github.com/goharbor/harbor/src/core/auth/ldap"
"github.com/goharbor/harbor/src/core/config"
@ -202,11 +203,18 @@ func init() {
beego.Router("/api/quotas", quotaAPIType, "get:List")
beego.Router("/api/quotas/:id([0-9]+)", quotaAPIType, "get:Get;put:Put")
beego.Router("/api/internal/switchquota", &InternalAPI{}, "put:SwitchQuota")
beego.Router("/api/internal/syncquota", &InternalAPI{}, "post:SyncQuota")
// syncRegistry
if err := SyncRegistry(config.GlobalProjectMgr); err != nil {
log.Fatalf("failed to sync repositories from registry: %v", err)
}
if err := quota.Sync(config.GlobalProjectMgr, false); err != nil {
log.Fatalf("failed to sync quota from backend: %v", err)
}
// Init user Info
admin = &usrInfo{adminName, adminPwd}
unknownUsr = &usrInfo{"unknown", "unknown"}

View File

@ -34,8 +34,9 @@ import (
)
var (
timeout = 60 * time.Second
healthCheckerRegistry = map[string]health.Checker{}
timeout = 60 * time.Second
// HealthCheckerRegistry ...
HealthCheckerRegistry = map[string]health.Checker{}
)
type overallHealthStatus struct {
@ -67,11 +68,11 @@ type HealthAPI struct {
func (h *HealthAPI) CheckHealth() {
var isHealthy healthy = true
components := []*componentHealthStatus{}
c := make(chan *componentHealthStatus, len(healthCheckerRegistry))
for name, checker := range healthCheckerRegistry {
c := make(chan *componentHealthStatus, len(HealthCheckerRegistry))
for name, checker := range HealthCheckerRegistry {
go check(name, checker, timeout, c)
}
for i := 0; i < len(healthCheckerRegistry); i++ {
for i := 0; i < len(HealthCheckerRegistry); i++ {
componentStatus := <-c
if len(componentStatus.Error) != 0 {
isHealthy = false
@ -290,21 +291,21 @@ func redisHealthChecker() health.Checker {
}
func registerHealthCheckers() {
healthCheckerRegistry["core"] = coreHealthChecker()
healthCheckerRegistry["portal"] = portalHealthChecker()
healthCheckerRegistry["jobservice"] = jobserviceHealthChecker()
healthCheckerRegistry["registry"] = registryHealthChecker()
healthCheckerRegistry["registryctl"] = registryCtlHealthChecker()
healthCheckerRegistry["database"] = databaseHealthChecker()
healthCheckerRegistry["redis"] = redisHealthChecker()
HealthCheckerRegistry["core"] = coreHealthChecker()
HealthCheckerRegistry["portal"] = portalHealthChecker()
HealthCheckerRegistry["jobservice"] = jobserviceHealthChecker()
HealthCheckerRegistry["registry"] = registryHealthChecker()
HealthCheckerRegistry["registryctl"] = registryCtlHealthChecker()
HealthCheckerRegistry["database"] = databaseHealthChecker()
HealthCheckerRegistry["redis"] = redisHealthChecker()
if config.WithChartMuseum() {
healthCheckerRegistry["chartmuseum"] = chartmuseumHealthChecker()
HealthCheckerRegistry["chartmuseum"] = chartmuseumHealthChecker()
}
if config.WithClair() {
healthCheckerRegistry["clair"] = clairHealthChecker()
HealthCheckerRegistry["clair"] = clairHealthChecker()
}
if config.WithNotary() {
healthCheckerRegistry["notary"] = notaryHealthChecker()
HealthCheckerRegistry["notary"] = notaryHealthChecker()
}
}

View File

@ -92,9 +92,9 @@ func fakeHealthChecker(healthy bool) health.Checker {
}
func TestCheckHealth(t *testing.T) {
// component01: healthy, component02: healthy => status: healthy
healthCheckerRegistry = map[string]health.Checker{}
healthCheckerRegistry["component01"] = fakeHealthChecker(true)
healthCheckerRegistry["component02"] = fakeHealthChecker(true)
HealthCheckerRegistry = map[string]health.Checker{}
HealthCheckerRegistry["component01"] = fakeHealthChecker(true)
HealthCheckerRegistry["component02"] = fakeHealthChecker(true)
status := map[string]interface{}{}
err := handleAndParse(&testingRequest{
method: http.MethodGet,
@ -104,9 +104,9 @@ func TestCheckHealth(t *testing.T) {
assert.Equal(t, "healthy", status["status"].(string))
// component01: healthy, component02: unhealthy => status: unhealthy
healthCheckerRegistry = map[string]health.Checker{}
healthCheckerRegistry["component01"] = fakeHealthChecker(true)
healthCheckerRegistry["component02"] = fakeHealthChecker(false)
HealthCheckerRegistry = map[string]health.Checker{}
HealthCheckerRegistry["component01"] = fakeHealthChecker(true)
HealthCheckerRegistry["component02"] = fakeHealthChecker(false)
status = map[string]interface{}{}
err = handleAndParse(&testingRequest{
method: http.MethodGet,
@ -128,7 +128,7 @@ func TestDatabaseHealthChecker(t *testing.T) {
}
func TestRegisterHealthCheckers(t *testing.T) {
healthCheckerRegistry = map[string]health.Checker{}
HealthCheckerRegistry = map[string]health.Checker{}
registerHealthCheckers()
assert.NotNil(t, healthCheckerRegistry["core"])
assert.NotNil(t, HealthCheckerRegistry["core"])
}

View File

@ -15,12 +15,21 @@
package api
import (
"errors"
"fmt"
"github.com/goharbor/harbor/src/common"
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/common/models"
common_quota "github.com/goharbor/harbor/src/common/quota"
"github.com/goharbor/harbor/src/common/utils/log"
"github.com/goharbor/harbor/src/core/config"
"github.com/goharbor/harbor/src/jobservice/logger"
"github.com/pkg/errors"
"strconv"
quota "github.com/goharbor/harbor/src/core/api/quota"
comcfg "github.com/goharbor/harbor/src/common/config"
)
// InternalAPI handles request of harbor admin...
@ -69,3 +78,103 @@ func (ia *InternalAPI) RenameAdmin() {
log.Debugf("The super user has been renamed to: %s", newName)
ia.DestroySession()
}
// QuotaSwitcher ...
type QuotaSwitcher struct {
Enabled bool
}
// SwitchQuota ...
func (ia *InternalAPI) SwitchQuota() {
var req QuotaSwitcher
if err := ia.DecodeJSONReq(&req); err != nil {
ia.SendBadRequestError(err)
return
}
// quota per project from disable to enable, it needs to update the quota usage bases on the DB records.
if !config.QuotaPerProjectEnable() && req.Enabled {
if err := ia.ensureQuota(); err != nil {
ia.SendInternalServerError(err)
return
}
}
defer func() {
config.GetCfgManager().Set(common.QuotaPerProjectEnable, req.Enabled)
config.GetCfgManager().Save()
}()
return
}
func (ia *InternalAPI) ensureQuota() error {
projects, err := dao.GetProjects(nil)
if err != nil {
return err
}
for _, project := range projects {
pSize, err := dao.CountSizeOfProject(project.ProjectID)
if err != nil {
logger.Warningf("error happen on counting size of project:%d , error:%v, just skip it.", project.ProjectID, err)
continue
}
afQuery := &models.ArtifactQuery{
PID: project.ProjectID,
}
afs, err := dao.ListArtifacts(afQuery)
if err != nil {
logger.Warningf("error happen on counting number of project:%d , error:%v, just skip it.", project.ProjectID, err)
continue
}
pCount := int64(len(afs))
// it needs to append the chart count
if config.WithChartMuseum() {
count, err := chartController.GetCountOfCharts([]string{project.Name})
if err != nil {
err = errors.Wrap(err, fmt.Sprintf("get chart count of project %d failed", project.ProjectID))
logger.Error(err)
continue
}
pCount = pCount + int64(count)
}
quotaMgr, err := common_quota.NewManager("project", strconv.FormatInt(project.ProjectID, 10))
if err != nil {
logger.Errorf("Error occurred when to new quota manager %v, just skip it.", err)
continue
}
used := common_quota.ResourceList{
common_quota.ResourceStorage: pSize,
common_quota.ResourceCount: pCount,
}
if err := quotaMgr.EnsureQuota(used); err != nil {
logger.Errorf("cannot ensure quota for the project: %d, err: %v, just skip it.", project.ProjectID, err)
continue
}
}
return nil
}
// SyncQuota ...
func (ia *InternalAPI) SyncQuota() {
cur := config.ReadOnly()
cfgMgr := comcfg.NewDBCfgManager()
if cur != true {
cfgMgr.Set(common.ReadOnly, true)
}
// For api call, to avoid the timeout, it should be asynchronous
go func() {
defer func() {
if cur != true {
cfgMgr.Set(common.ReadOnly, false)
}
}()
log.Info("start to sync quota(API), the system will be set to ReadOnly and back it normal once it done.")
err := quota.Sync(ia.ProjectMgr, false)
if err != nil {
log.Errorf("fail to sync quota(API), but with error: %v, please try to do it again.", err)
return
}
log.Info("success to sync quota(API).")
}()
return
}

View File

@ -0,0 +1,89 @@
// Copyright 2018 Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package api
import (
"net/http"
"testing"
)
// cannot verify the real scenario here
func TestSwitchQuota(t *testing.T) {
cases := []*codeCheckingCase{
// 401
{
request: &testingRequest{
method: http.MethodPut,
url: "/api/internal/switchquota",
},
code: http.StatusUnauthorized,
},
// 200
{
request: &testingRequest{
method: http.MethodPut,
url: "/api/internal/switchquota",
credential: sysAdmin,
bodyJSON: &QuotaSwitcher{
Enabled: true,
},
},
code: http.StatusOK,
},
// 403
{
request: &testingRequest{
url: "/api/internal/switchquota",
method: http.MethodPut,
credential: nonSysAdmin,
},
code: http.StatusForbidden,
},
}
runCodeCheckingCases(t, cases...)
}
// cannot verify the real scenario here
func TestSyncQuota(t *testing.T) {
cases := []*codeCheckingCase{
// 401
{
request: &testingRequest{
method: http.MethodPost,
url: "/api/internal/syncquota",
},
code: http.StatusUnauthorized,
},
// 200
{
request: &testingRequest{
method: http.MethodPost,
url: "/api/internal/syncquota",
credential: sysAdmin,
},
code: http.StatusOK,
},
// 403
{
request: &testingRequest{
url: "/api/internal/syncquota",
method: http.MethodPost,
credential: nonSysAdmin,
},
code: http.StatusForbidden,
},
}
runCodeCheckingCases(t, cases...)
}

View File

@ -78,8 +78,7 @@ func (l *LabelAPI) requireAccess(label *models.Label, action rbac.Action, subres
if len(subresources) == 0 {
subresources = append(subresources, rbac.ResourceLabel)
}
resource := rbac.NewProjectNamespace(label.ProjectID).Resource(subresources...)
hasPermission = l.SecurityCtx.Can(action, resource)
hasPermission, _ = l.HasProjectPermission(label.ProjectID, action, subresources...)
}
if !hasPermission {
@ -203,13 +202,7 @@ func (l *LabelAPI) List() {
return
}
resource := rbac.NewProjectNamespace(projectID).Resource(rbac.ResourceLabel)
if !l.SecurityCtx.Can(rbac.ActionList, resource) {
if !l.SecurityCtx.IsAuthenticated() {
l.SendUnAuthorizedError(errors.New("UnAuthorized"))
return
}
l.SendForbiddenError(errors.New(l.SecurityCtx.GetUsername()))
if !l.RequireProjectAccess(projectID, rbac.ActionList, rbac.ResourceLabel) {
return
}
query.ProjectID = projectID

View File

@ -22,6 +22,7 @@ import (
"strings"
"errors"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/rbac"
"github.com/goharbor/harbor/src/common/utils/log"
@ -90,18 +91,7 @@ func (m *MetadataAPI) Prepare() {
}
func (m *MetadataAPI) requireAccess(action rbac.Action) bool {
resource := rbac.NewProjectNamespace(m.project.ProjectID).Resource(rbac.ResourceMetadata)
if !m.SecurityCtx.Can(action, resource) {
if !m.SecurityCtx.IsAuthenticated() {
m.SendUnAuthorizedError(errors.New("Unauthorized"))
} else {
m.SendForbiddenError(errors.New(m.SecurityCtx.GetUsername()))
}
return false
}
return true
return m.RequireProjectAccess(m.project.ProjectID, action, rbac.ResourceMetadata)
}
// Get ...

View File

@ -93,16 +93,5 @@ func (w *NotificationJobAPI) validateRBAC(action rbac.Action, projectID int64) b
return true
}
project, err := w.ProjectMgr.Get(projectID)
if err != nil {
w.ParseAndHandleError(fmt.Sprintf("failed to get project %d", projectID), err)
return false
}
resource := rbac.NewProjectNamespace(project.ProjectID).Resource(rbac.ResourceNotificationPolicy)
if !w.SecurityCtx.Can(action, resource) {
w.SendForbiddenError(errors.New(w.SecurityCtx.GetUsername()))
return false
}
return true
return w.RequireProjectAccess(projectID, action, rbac.ResourceNotificationPolicy)
}

View File

@ -283,18 +283,7 @@ func (w *NotificationPolicyAPI) validateRBAC(action rbac.Action, projectID int64
return true
}
project, err := w.ProjectMgr.Get(projectID)
if err != nil {
w.ParseAndHandleError(fmt.Sprintf("failed to get project %d", projectID), err)
return false
}
resource := rbac.NewProjectNamespace(project.ProjectID).Resource(rbac.ResourceNotificationPolicy)
if !w.SecurityCtx.Can(action, resource) {
w.SendForbiddenError(errors.New(w.SecurityCtx.GetUsername()))
return false
}
return true
return w.RequireProjectAccess(projectID, action, rbac.ResourceNotificationPolicy)
}
func (w *NotificationPolicyAPI) validateTargets(policy *models.NotificationPolicy) bool {

View File

@ -86,20 +86,8 @@ func (p *ProjectAPI) requireAccess(action rbac.Action, subresource ...rbac.Resou
if len(subresource) == 0 {
subresource = append(subresource, rbac.ResourceSelf)
}
resource := rbac.NewProjectNamespace(p.project.ProjectID).Resource(subresource...)
if !p.SecurityCtx.Can(action, resource) {
if !p.SecurityCtx.IsAuthenticated() {
p.SendUnAuthorizedError(errors.New("Unauthorized"))
} else {
p.SendForbiddenError(errors.New(p.SecurityCtx.GetUsername()))
}
return false
}
return true
return p.RequireProjectAccess(p.project.ProjectID, action, subresource...)
}
// Post ...
@ -139,23 +127,26 @@ func (p *ProjectAPI) Post() {
return
}
setting, err := config.QuotaSetting()
if err != nil {
log.Errorf("failed to get quota setting: %v", err)
p.SendInternalServerError(fmt.Errorf("failed to get quota setting: %v", err))
return
}
var hardLimits types.ResourceList
if config.QuotaPerProjectEnable() {
setting, err := config.QuotaSetting()
if err != nil {
log.Errorf("failed to get quota setting: %v", err)
p.SendInternalServerError(fmt.Errorf("failed to get quota setting: %v", err))
return
}
if !p.SecurityCtx.IsSysAdmin() {
pro.CountLimit = &setting.CountPerProject
pro.StorageLimit = &setting.StoragePerProject
}
if !p.SecurityCtx.IsSysAdmin() {
pro.CountLimit = &setting.CountPerProject
pro.StorageLimit = &setting.StoragePerProject
}
hardLimits, err := projectQuotaHardLimits(pro, setting)
if err != nil {
log.Errorf("Invalid project request, error: %v", err)
p.SendBadRequestError(fmt.Errorf("invalid request: %v", err))
return
hardLimits, err = projectQuotaHardLimits(pro, setting)
if err != nil {
log.Errorf("Invalid project request, error: %v", err)
p.SendBadRequestError(fmt.Errorf("invalid request: %v", err))
return
}
}
exist, err := p.ProjectMgr.Exists(pro.Name)
@ -212,14 +203,16 @@ func (p *ProjectAPI) Post() {
return
}
quotaMgr, err := quota.NewManager("project", strconv.FormatInt(projectID, 10))
if err != nil {
p.SendInternalServerError(fmt.Errorf("failed to get quota manager: %v", err))
return
}
if _, err := quotaMgr.NewQuota(hardLimits); err != nil {
p.SendInternalServerError(fmt.Errorf("failed to create quota for project: %v", err))
return
if config.QuotaPerProjectEnable() {
quotaMgr, err := quota.NewManager("project", strconv.FormatInt(projectID, 10))
if err != nil {
p.SendInternalServerError(fmt.Errorf("failed to get quota manager: %v", err))
return
}
if _, err := quotaMgr.NewQuota(hardLimits); err != nil {
p.SendInternalServerError(fmt.Errorf("failed to create quota for project: %v", err))
return
}
}
go func() {
@ -653,6 +646,11 @@ func projectQuotaHardLimits(req *models.ProjectRequest, setting *models.QuotaSet
}
func getProjectQuotaSummary(projectID int64, summary *models.ProjectSummary) {
if !config.QuotaPerProjectEnable() {
log.Debug("Quota per project disabled")
return
}
quotas, err := dao.ListQuotas(&models.QuotaQuery{Reference: "project", ReferenceID: strconv.FormatInt(projectID, 10)})
if err != nil {
log.Debugf("failed to get quota for project: %d", projectID)

View File

@ -172,7 +172,7 @@ func TestListProjects(t *testing.T) {
}()
// ----------------------------case 1 : Response Code=200----------------------------//
fmt.Println("case 1: respose code:200")
fmt.Println("case 1: response code:200")
httpStatusCode, result, err := apiTest.ProjectsGet(
&apilib.ProjectQuery{
Name: addProject.ProjectName,
@ -263,7 +263,7 @@ func TestProGetByID(t *testing.T) {
}()
// ----------------------------case 1 : Response Code=200----------------------------//
fmt.Println("case 1: respose code:200")
fmt.Println("case 1: response code:200")
httpStatusCode, result, err := apiTest.ProjectsGetByPID(projectID)
if err != nil {
t.Error("Error while search project by proID", err.Error())
@ -295,7 +295,7 @@ func TestDeleteProject(t *testing.T) {
}
// --------------------------case 2: Response Code=200---------------------------------//
fmt.Println("case2: respose code:200")
fmt.Println("case2: response code:200")
httpStatusCode, err = apiTest.ProjectsDelete(*admin, projectID)
if err != nil {
t.Error("Error while delete project", err.Error())
@ -335,7 +335,7 @@ func TestProHead(t *testing.T) {
apiTest := newHarborAPI()
// ----------------------------case 1 : Response Code=200----------------------------//
fmt.Println("case 1: respose code:200")
fmt.Println("case 1: response code:200")
httpStatusCode, err := apiTest.ProjectsHead(*admin, "library")
if err != nil {
t.Error("Error while search project by proName", err.Error())
@ -345,7 +345,7 @@ func TestProHead(t *testing.T) {
}
// ----------------------------case 2 : Response Code=404:Project name does not exist.----------------------------//
fmt.Println("case 2: respose code:404,Project name does not exist.")
fmt.Println("case 2: response code:404,Project name does not exist.")
httpStatusCode, err = apiTest.ProjectsHead(*admin, "libra")
if err != nil {
t.Error("Error while search project by proName", err.Error())
@ -369,22 +369,22 @@ func TestPut(t *testing.T) {
},
}
fmt.Println("case 1: respose code:200")
fmt.Println("case 1: response code:200")
code, err := apiTest.ProjectsPut(*admin, "1", project)
require.Nil(t, err)
assert.Equal(int(200), code)
fmt.Println("case 2: respose code:401, User need to log in first.")
fmt.Println("case 2: response code:401, User need to log in first.")
code, err = apiTest.ProjectsPut(*unknownUsr, "1", project)
require.Nil(t, err)
assert.Equal(int(401), code)
fmt.Println("case 3: respose code:400, Invalid project id")
fmt.Println("case 3: response code:400, Invalid project id")
code, err = apiTest.ProjectsPut(*admin, "cc", project)
require.Nil(t, err)
assert.Equal(int(400), code)
fmt.Println("case 4: respose code:404, Not found the project")
fmt.Println("case 4: response code:404, Not found the project")
code, err = apiTest.ProjectsPut(*admin, "1234", project)
require.Nil(t, err)
assert.Equal(int(404), code)
@ -407,7 +407,7 @@ func TestProjectLogsFilter(t *testing.T) {
}
// -------------------case1: Response Code=200------------------------------//
fmt.Println("case 1: respose code:200")
fmt.Println("case 1: response code:200")
projectID := "1"
httpStatusCode, _, err := apiTest.ProjectLogs(*admin, projectID, query)
if err != nil {
@ -417,7 +417,7 @@ func TestProjectLogsFilter(t *testing.T) {
assert.Equal(int(200), httpStatusCode, "httpStatusCode should be 200")
}
// -------------------case2: Response Code=401:User need to log in first.------------------------------//
fmt.Println("case 2: respose code:401:User need to log in first.")
fmt.Println("case 2: response code:401:User need to log in first.")
projectID = "1"
httpStatusCode, _, err = apiTest.ProjectLogs(*unknownUsr, projectID, query)
if err != nil {
@ -427,7 +427,7 @@ func TestProjectLogsFilter(t *testing.T) {
assert.Equal(int(401), httpStatusCode, "httpStatusCode should be 401")
}
// -------------------case3: Response Code=404:Project does not exist.-------------------------//
fmt.Println("case 3: respose code:404:Illegal format of provided ID value.")
fmt.Println("case 3: response code:404:Illegal format of provided ID value.")
projectID = "11111"
httpStatusCode, _, err = apiTest.ProjectLogs(*admin, projectID, query)
if err != nil {
@ -498,7 +498,7 @@ func TestProjectSummary(t *testing.T) {
}()
// ----------------------------case 1 : Response Code=200----------------------------//
fmt.Println("case 1: respose code:200")
fmt.Println("case 1: response code:200")
httpStatusCode, summary, err := apiTest.ProjectSummary(*admin, fmt.Sprintf("%d", projectID))
if err != nil {
t.Error("Error while search project by proName", err.Error())

View File

@ -99,19 +99,7 @@ func (pma *ProjectMemberAPI) Prepare() {
}
func (pma *ProjectMemberAPI) requireAccess(action rbac.Action) bool {
resource := rbac.NewProjectNamespace(pma.project.ProjectID).Resource(rbac.ResourceMember)
if !pma.SecurityCtx.Can(action, resource) {
if !pma.SecurityCtx.IsAuthenticated() {
pma.SendUnAuthorizedError(errors.New("Unauthorized"))
} else {
pma.SendForbiddenError(errors.New(pma.SecurityCtx.GetUsername()))
}
return false
}
return true
return pma.RequireProjectAccess(pma.project.ProjectID, action, rbac.ResourceMember)
}
// Get ...

View File

@ -0,0 +1,226 @@
// Copyright 2018 Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package chart
import (
"fmt"
"github.com/goharbor/harbor/src/chartserver"
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/common/models"
common_quota "github.com/goharbor/harbor/src/common/quota"
"github.com/goharbor/harbor/src/common/utils/log"
"github.com/goharbor/harbor/src/core/api"
quota "github.com/goharbor/harbor/src/core/api/quota"
"github.com/goharbor/harbor/src/core/config"
"github.com/goharbor/harbor/src/core/promgr"
"github.com/pkg/errors"
"net/url"
"strings"
"sync"
)
// Migrator ...
type Migrator struct {
pm promgr.ProjectManager
}
// NewChartMigrator returns a new RegistryMigrator.
func NewChartMigrator(pm promgr.ProjectManager) quota.QuotaMigrator {
migrator := Migrator{
pm: pm,
}
return &migrator
}
var (
controller *chartserver.Controller
controllerErr error
controllerOnce sync.Once
)
// Ping ...
func (rm *Migrator) Ping() error {
return api.HealthCheckerRegistry["chartmuseum"].Check()
}
// Dump ...
// Depends on DB to dump chart data, as chart cannot get all of namespaces.
func (rm *Migrator) Dump() ([]quota.ProjectInfo, error) {
var (
projects []quota.ProjectInfo
wg sync.WaitGroup
err error
)
all, err := dao.GetProjects(nil)
if err != nil {
return nil, err
}
wg.Add(len(all))
errChan := make(chan error, 1)
infoChan := make(chan interface{})
done := make(chan bool, 1)
go func() {
defer func() {
done <- true
}()
for {
select {
case result := <-infoChan:
if result == nil {
return
}
project, ok := result.(quota.ProjectInfo)
if ok {
projects = append(projects, project)
}
case e := <-errChan:
if err == nil {
err = errors.Wrap(e, "quota sync error on getting info of project")
} else {
err = errors.Wrap(e, err.Error())
}
}
}
}()
for _, project := range all {
go func(project *models.Project) {
defer wg.Done()
var repos []quota.RepoData
ctr, err := chartController()
if err != nil {
errChan <- err
return
}
chartInfo, err := ctr.ListCharts(project.Name)
if err != nil {
errChan <- err
return
}
// repo
for _, chart := range chartInfo {
var afs []*models.Artifact
chartVersions, err := ctr.GetChart(project.Name, chart.Name)
if err != nil {
errChan <- err
continue
}
for _, chart := range chartVersions {
af := &models.Artifact{
PID: project.ProjectID,
Repo: chart.Name,
Tag: chart.Version,
Digest: chart.Digest,
Kind: "Chart",
}
afs = append(afs, af)
}
repoData := quota.RepoData{
Name: project.Name,
Afs: afs,
}
repos = append(repos, repoData)
}
projectInfo := quota.ProjectInfo{
Name: project.Name,
Repos: repos,
}
infoChan <- projectInfo
}(project)
}
wg.Wait()
close(infoChan)
<-done
if err != nil {
return nil, err
}
return projects, nil
}
// Usage ...
// Chart will not cover size.
func (rm *Migrator) Usage(projects []quota.ProjectInfo) ([]quota.ProjectUsage, error) {
var pros []quota.ProjectUsage
for _, project := range projects {
var count int64
// usage count
for _, repo := range project.Repos {
count = count + int64(len(repo.Afs))
}
proUsage := quota.ProjectUsage{
Project: project.Name,
Used: common_quota.ResourceList{
common_quota.ResourceCount: count,
common_quota.ResourceStorage: 0,
},
}
pros = append(pros, proUsage)
}
return pros, nil
}
// Persist ...
// Chart will not persist data into db.
func (rm *Migrator) Persist(projects []quota.ProjectInfo) error {
return nil
}
func chartController() (*chartserver.Controller, error) {
controllerOnce.Do(func() {
addr, err := config.GetChartMuseumEndpoint()
if err != nil {
controllerErr = fmt.Errorf("failed to get the endpoint URL of chart storage server: %s", err.Error())
return
}
addr = strings.TrimSuffix(addr, "/")
url, err := url.Parse(addr)
if err != nil {
controllerErr = errors.New("endpoint URL of chart storage server is malformed")
return
}
ctr, err := chartserver.NewController(url)
if err != nil {
controllerErr = errors.New("failed to initialize chart API controller")
}
controller = ctr
log.Debugf("Chart storage server is set to %s", url.String())
log.Info("API controller for chart repository server is successfully initialized")
})
return controller, controllerErr
}
func init() {
quota.Register("chart", NewChartMigrator)
}

View File

@ -0,0 +1,173 @@
// Copyright 2018 Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package api
import (
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/quota"
"github.com/goharbor/harbor/src/common/utils/log"
"github.com/goharbor/harbor/src/core/config"
"github.com/goharbor/harbor/src/core/promgr"
"github.com/goharbor/harbor/src/pkg/types"
"strconv"
)
// QuotaMigrator ...
type QuotaMigrator interface {
// Ping validates and wait for backend service ready.
Ping() error
// Dump exports all data from backend service, registry, chartmuseum
Dump() ([]ProjectInfo, error)
// Usage computes the quota usage of all the projects
Usage([]ProjectInfo) ([]ProjectUsage, error)
// Persist record the data to DB, artifact, artifact_blob and blob tabel.
Persist([]ProjectInfo) error
}
// ProjectInfo ...
type ProjectInfo struct {
Name string
Repos []RepoData
}
// RepoData ...
type RepoData struct {
Name string
Afs []*models.Artifact
Afnbs []*models.ArtifactAndBlob
Blobs []*models.Blob
}
// ProjectUsage ...
type ProjectUsage struct {
Project string
Used quota.ResourceList
}
// Instance ...
type Instance func(promgr.ProjectManager) QuotaMigrator
var adapters = make(map[string]Instance)
// Register ...
func Register(name string, adapter Instance) {
if adapter == nil {
panic("quota: Register adapter is nil")
}
if _, ok := adapters[name]; ok {
panic("quota: Register called twice for adapter " + name)
}
adapters[name] = adapter
}
// Sync ...
func Sync(pm promgr.ProjectManager, populate bool) error {
totalUsage := make(map[string][]ProjectUsage)
for name, instanceFunc := range adapters {
if !config.WithChartMuseum() {
if name == "chart" {
continue
}
}
adapter := instanceFunc(pm)
if err := adapter.Ping(); err != nil {
return err
}
data, err := adapter.Dump()
if err != nil {
return err
}
usage, err := adapter.Usage(data)
if err != nil {
return err
}
totalUsage[name] = usage
if populate {
if err := adapter.Persist(data); err != nil {
return err
}
}
}
merged := mergeUsage(totalUsage)
if err := ensureQuota(merged); err != nil {
return err
}
return nil
}
// mergeUsage merges the usage of adapters
func mergeUsage(total map[string][]ProjectUsage) []ProjectUsage {
if !config.WithChartMuseum() {
return total["registry"]
}
regUsgs := total["registry"]
chartUsgs := total["chart"]
var mergedUsage []ProjectUsage
temp := make(map[string]quota.ResourceList)
for _, regUsg := range regUsgs {
_, exist := temp[regUsg.Project]
if !exist {
temp[regUsg.Project] = regUsg.Used
mergedUsage = append(mergedUsage, ProjectUsage{
Project: regUsg.Project,
Used: regUsg.Used,
})
}
}
for _, chartUsg := range chartUsgs {
var usedTemp quota.ResourceList
_, exist := temp[chartUsg.Project]
if !exist {
usedTemp = chartUsg.Used
} else {
usedTemp = types.Add(temp[chartUsg.Project], chartUsg.Used)
}
temp[chartUsg.Project] = usedTemp
mergedUsage = append(mergedUsage, ProjectUsage{
Project: chartUsg.Project,
Used: usedTemp,
})
}
return mergedUsage
}
// ensureQuota updates the quota and quota usage in the data base.
func ensureQuota(usages []ProjectUsage) error {
var pid int64
for _, usage := range usages {
project, err := dao.GetProjectByName(usage.Project)
if err != nil {
log.Error(err)
return err
}
pid = project.ProjectID
quotaMgr, err := quota.NewManager("project", strconv.FormatInt(pid, 10))
if err != nil {
log.Errorf("Error occurred when to new quota manager %v", err)
return err
}
if err := quotaMgr.EnsureQuota(usage.Used); err != nil {
log.Errorf("cannot ensure quota for the project: %d, err: %v", pid, err)
return err
}
}
return nil
}

View File

@ -0,0 +1,436 @@
// Copyright 2018 Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package registry
import (
"github.com/docker/distribution/manifest/schema1"
"github.com/docker/distribution/manifest/schema2"
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/common/models"
common_quota "github.com/goharbor/harbor/src/common/quota"
"github.com/goharbor/harbor/src/common/utils/log"
"github.com/goharbor/harbor/src/common/utils/registry"
"github.com/goharbor/harbor/src/core/api"
quota "github.com/goharbor/harbor/src/core/api/quota"
"github.com/goharbor/harbor/src/core/promgr"
coreutils "github.com/goharbor/harbor/src/core/utils"
"github.com/pkg/errors"
"strings"
"sync"
"time"
)
// Migrator ...
type Migrator struct {
pm promgr.ProjectManager
}
// NewRegistryMigrator returns a new Migrator.
func NewRegistryMigrator(pm promgr.ProjectManager) quota.QuotaMigrator {
migrator := Migrator{
pm: pm,
}
return &migrator
}
// Ping ...
func (rm *Migrator) Ping() error {
return api.HealthCheckerRegistry["registry"].Check()
}
// Dump ...
func (rm *Migrator) Dump() ([]quota.ProjectInfo, error) {
var (
projects []quota.ProjectInfo
wg sync.WaitGroup
err error
)
reposInRegistry, err := api.Catalog()
if err != nil {
return nil, err
}
// repoMap : map[project_name : []repo list]
repoMap := make(map[string][]string)
for _, item := range reposInRegistry {
projectName := strings.Split(item, "/")[0]
pro, err := rm.pm.Get(projectName)
if err != nil {
log.Errorf("failed to get project %s: %v", projectName, err)
continue
}
if pro == nil {
continue
}
_, exist := repoMap[pro.Name]
if !exist {
repoMap[pro.Name] = []string{item}
} else {
repos := repoMap[pro.Name]
repos = append(repos, item)
repoMap[pro.Name] = repos
}
}
wg.Add(len(repoMap))
errChan := make(chan error, 1)
infoChan := make(chan interface{})
done := make(chan bool, 1)
go func() {
defer func() {
done <- true
}()
for {
select {
case result := <-infoChan:
if result == nil {
return
}
project, ok := result.(quota.ProjectInfo)
if ok {
projects = append(projects, project)
}
case e := <-errChan:
if err == nil {
err = errors.Wrap(e, "quota sync error on getting info of project")
} else {
err = errors.Wrap(e, err.Error())
}
}
}
}()
for project, repos := range repoMap {
go func(project string, repos []string) {
defer wg.Done()
info, err := infoOfProject(project, repos)
if err != nil {
errChan <- err
return
}
infoChan <- info
}(project, repos)
}
wg.Wait()
close(infoChan)
// wait for all of project info
<-done
if err != nil {
return nil, err
}
return projects, nil
}
// Usage ...
// registry needs to merge the shard blobs of different repositories.
func (rm *Migrator) Usage(projects []quota.ProjectInfo) ([]quota.ProjectUsage, error) {
var pros []quota.ProjectUsage
for _, project := range projects {
var size, count int64
var blobs = make(map[string]int64)
// usage count
for _, repo := range project.Repos {
count = count + int64(len(repo.Afs))
// Because that there are some shared blobs between repositories, it needs to remove the duplicate items.
for _, blob := range repo.Blobs {
_, exist := blobs[blob.Digest]
if !exist {
blobs[blob.Digest] = blob.Size
}
}
}
// size
for _, item := range blobs {
size = size + item
}
proUsage := quota.ProjectUsage{
Project: project.Name,
Used: common_quota.ResourceList{
common_quota.ResourceCount: count,
common_quota.ResourceStorage: size,
},
}
pros = append(pros, proUsage)
}
return pros, nil
}
// Persist ...
func (rm *Migrator) Persist(projects []quota.ProjectInfo) error {
for _, project := range projects {
for _, repo := range project.Repos {
if err := persistAf(repo.Afs); err != nil {
return err
}
if err := persistAfnbs(repo.Afnbs); err != nil {
return err
}
if err := persistBlob(repo.Blobs); err != nil {
return err
}
}
}
if err := persistPB(projects); err != nil {
return err
}
return nil
}
func persistAf(afs []*models.Artifact) error {
if len(afs) != 0 {
for _, af := range afs {
_, err := dao.AddArtifact(af)
if err != nil {
if err == dao.ErrDupRows {
continue
}
log.Error(err)
return err
}
}
}
return nil
}
func persistAfnbs(afnbs []*models.ArtifactAndBlob) error {
if len(afnbs) != 0 {
for _, afnb := range afnbs {
_, err := dao.AddArtifactNBlob(afnb)
if err != nil {
if err == dao.ErrDupRows {
continue
}
log.Error(err)
return err
}
}
}
return nil
}
func persistBlob(blobs []*models.Blob) error {
if len(blobs) != 0 {
for _, blob := range blobs {
_, err := dao.AddBlob(blob)
if err != nil {
if err == dao.ErrDupRows {
continue
}
log.Error(err)
return err
}
}
}
return nil
}
func persistPB(projects []quota.ProjectInfo) error {
for _, project := range projects {
var blobs = make(map[string]int64)
var blobsOfPro []*models.Blob
for _, repo := range project.Repos {
for _, blob := range repo.Blobs {
_, exist := blobs[blob.Digest]
if exist {
continue
}
blobs[blob.Digest] = blob.Size
blobInDB, err := dao.GetBlob(blob.Digest)
if err != nil {
log.Error(err)
return err
}
if blobInDB != nil {
blobsOfPro = append(blobsOfPro, blobInDB)
}
}
}
pro, err := dao.GetProjectByName(project.Name)
if err != nil {
log.Error(err)
return err
}
_, err = dao.AddBlobsToProject(pro.ProjectID, blobsOfPro...)
if err != nil {
log.Error(err)
return err
}
}
return nil
}
func infoOfProject(project string, repoList []string) (quota.ProjectInfo, error) {
var (
repos []quota.RepoData
wg sync.WaitGroup
err error
)
wg.Add(len(repoList))
errChan := make(chan error, 1)
infoChan := make(chan interface{})
done := make(chan bool, 1)
pro, err := dao.GetProjectByName(project)
if err != nil {
log.Error(err)
return quota.ProjectInfo{}, err
}
go func() {
defer func() {
done <- true
}()
for {
select {
case result := <-infoChan:
if result == nil {
return
}
repoData, ok := result.(quota.RepoData)
if ok {
repos = append(repos, repoData)
}
case e := <-errChan:
if err == nil {
err = errors.Wrap(e, "quota sync error on getting info of repo")
} else {
err = errors.Wrap(e, err.Error())
}
}
}
}()
for _, repo := range repoList {
go func(pid int64, repo string) {
defer func() {
wg.Done()
}()
info, err := infoOfRepo(pid, repo)
if err != nil {
errChan <- err
return
}
infoChan <- info
}(pro.ProjectID, repo)
}
wg.Wait()
close(infoChan)
<-done
if err != nil {
return quota.ProjectInfo{}, err
}
return quota.ProjectInfo{
Name: project,
Repos: repos,
}, nil
}
func infoOfRepo(pid int64, repo string) (quota.RepoData, error) {
repoClient, err := coreutils.NewRepositoryClientForUI("harbor-core", repo)
if err != nil {
return quota.RepoData{}, err
}
tags, err := repoClient.ListTag()
if err != nil {
return quota.RepoData{}, err
}
var afnbs []*models.ArtifactAndBlob
var afs []*models.Artifact
var blobs []*models.Blob
for _, tag := range tags {
_, mediaType, payload, err := repoClient.PullManifest(tag, []string{
schema1.MediaTypeManifest,
schema1.MediaTypeSignedManifest,
schema2.MediaTypeManifest,
})
if err != nil {
log.Error(err)
return quota.RepoData{}, err
}
manifest, desc, err := registry.UnMarshal(mediaType, payload)
if err != nil {
log.Error(err)
return quota.RepoData{}, err
}
// self
afnb := &models.ArtifactAndBlob{
DigestAF: desc.Digest.String(),
DigestBlob: desc.Digest.String(),
}
afnbs = append(afnbs, afnb)
// add manifest as a blob.
blob := &models.Blob{
Digest: desc.Digest.String(),
ContentType: desc.MediaType,
Size: desc.Size,
CreationTime: time.Now(),
}
blobs = append(blobs, blob)
for _, layer := range manifest.References() {
afnb := &models.ArtifactAndBlob{
DigestAF: desc.Digest.String(),
DigestBlob: layer.Digest.String(),
}
afnbs = append(afnbs, afnb)
blob := &models.Blob{
Digest: layer.Digest.String(),
ContentType: layer.MediaType,
Size: layer.Size,
CreationTime: time.Now(),
}
blobs = append(blobs, blob)
}
af := &models.Artifact{
PID: pid,
Repo: strings.Split(repo, "/")[1],
Tag: tag,
Digest: desc.Digest.String(),
Kind: "Docker-Image",
CreationTime: time.Now(),
}
afs = append(afs, af)
}
return quota.RepoData{
Name: repo,
Afs: afs,
Afnbs: afnbs,
Blobs: blobs,
}, nil
}
func init() {
quota.Register("registry", NewRegistryMigrator)
}

View File

@ -111,13 +111,7 @@ func (ra *RepositoryAPI) Get() {
return
}
resource := rbac.NewProjectNamespace(projectID).Resource(rbac.ResourceRepository)
if !ra.SecurityCtx.Can(rbac.ActionList, resource) {
if !ra.SecurityCtx.IsAuthenticated() {
ra.SendUnAuthorizedError(errors.New("Unauthorized"))
return
}
ra.SendForbiddenError(errors.New(ra.SecurityCtx.GetUsername()))
if !ra.RequireProjectAccess(projectID, rbac.ActionList, rbac.ResourceRepository) {
return
}
@ -228,14 +222,8 @@ func (ra *RepositoryAPI) Delete() {
return
}
if !ra.SecurityCtx.IsAuthenticated() {
ra.SendUnAuthorizedError(errors.New("UnAuthorized"))
return
}
resource := rbac.NewProjectNamespace(project.ProjectID).Resource(rbac.ResourceRepository)
if !ra.SecurityCtx.Can(rbac.ActionDelete, resource) {
ra.SendForbiddenError(errors.New(ra.SecurityCtx.GetUsername()))
if !ra.RequireAuthenticated() ||
!ra.RequireProjectAccess(project.ProjectID, rbac.ActionDelete, rbac.ResourceRepository) {
return
}
@ -403,14 +391,9 @@ func (ra *RepositoryAPI) GetTag() {
ra.SendNotFoundError(fmt.Errorf("resource: %s:%s not found", repository, tag))
return
}
project, _ := utils.ParseRepository(repository)
resource := rbac.NewProjectNamespace(project).Resource(rbac.ResourceRepositoryTag)
if !ra.SecurityCtx.Can(rbac.ActionRead, resource) {
if !ra.SecurityCtx.IsAuthenticated() {
ra.SendUnAuthorizedError(errors.New("UnAuthorized"))
return
}
ra.SendForbiddenError(errors.New(ra.SecurityCtx.GetUsername()))
projectName, _ := utils.ParseRepository(repository)
if !ra.RequireProjectAccess(projectName, rbac.ActionRead, rbac.ResourceRepositoryTag) {
return
}
@ -503,16 +486,14 @@ func (ra *RepositoryAPI) Retag() {
}
// Check whether user has read permission to source project
srcResource := rbac.NewProjectNamespace(srcImage.Project).Resource(rbac.ResourceRepository)
if !ra.SecurityCtx.Can(rbac.ActionPull, srcResource) {
if hasPermission, _ := ra.HasProjectPermission(srcImage.Project, rbac.ActionPull, rbac.ResourceRepository); !hasPermission {
log.Errorf("user has no read permission to project '%s'", srcImage.Project)
ra.SendForbiddenError(fmt.Errorf("%s has no read permission to project %s", ra.SecurityCtx.GetUsername(), srcImage.Project))
return
}
// Check whether user has write permission to target project
destResource := rbac.NewProjectNamespace(project).Resource(rbac.ResourceRepository)
if !ra.SecurityCtx.Can(rbac.ActionPush, destResource) {
if hasPermission, _ := ra.HasProjectPermission(project, rbac.ActionPush, rbac.ResourceRepository); !hasPermission {
log.Errorf("user has no write permission to project '%s'", project)
ra.SendForbiddenError(fmt.Errorf("%s has no write permission to project %s", ra.SecurityCtx.GetUsername(), project))
return
@ -550,13 +531,7 @@ func (ra *RepositoryAPI) GetTags() {
return
}
resource := rbac.NewProjectNamespace(projectName).Resource(rbac.ResourceRepositoryTag)
if !ra.SecurityCtx.Can(rbac.ActionList, resource) {
if !ra.SecurityCtx.IsAuthenticated() {
ra.SendUnAuthorizedError(errors.New("UnAuthorized"))
return
}
ra.SendForbiddenError(errors.New(ra.SecurityCtx.GetUsername()))
if !ra.RequireProjectAccess(projectName, rbac.ActionList, rbac.ResourceRepositoryTag) {
return
}
@ -585,7 +560,12 @@ func (ra *RepositoryAPI) GetTags() {
}
labeledTags := map[string]struct{}{}
for _, rl := range rls {
labeledTags[strings.Split(rl.ResourceName, ":")[1]] = struct{}{}
strs := strings.SplitN(rl.ResourceName, ":", 2)
// the "rls" may contain images which don't belong to the repository
if strs[0] != repoName {
continue
}
labeledTags[strs[1]] = struct{}{}
}
ts := []string{}
for _, tag := range tags {
@ -596,11 +576,31 @@ func (ra *RepositoryAPI) GetTags() {
tags = ts
}
detail, err := ra.GetBool("detail", true)
if !detail && err == nil {
ra.Data["json"] = simpleTags(tags)
ra.ServeJSON()
return
}
ra.Data["json"] = assembleTagsInParallel(client, repoName, tags,
ra.SecurityCtx.GetUsername())
ra.ServeJSON()
}
func simpleTags(tags []string) []*models.TagResp {
var tagsResp []*models.TagResp
for _, tag := range tags {
tagsResp = append(tagsResp, &models.TagResp{
TagDetail: models.TagDetail{
Name: tag,
},
})
}
return tagsResp
}
// get config, signature and scan overview and assemble them into one
// struct for each tag in tags
func assembleTagsInParallel(client *registry.Repository, repository string,
@ -791,14 +791,7 @@ func (ra *RepositoryAPI) GetManifests() {
return
}
resource := rbac.NewProjectNamespace(projectName).Resource(rbac.ResourceRepositoryTagManifest)
if !ra.SecurityCtx.Can(rbac.ActionRead, resource) {
if !ra.SecurityCtx.IsAuthenticated() {
ra.SendUnAuthorizedError(errors.New("Unauthorized"))
return
}
ra.SendForbiddenError(errors.New(ra.SecurityCtx.GetUsername()))
if !ra.RequireProjectAccess(projectName, rbac.ActionRead, rbac.ResourceRepositoryTagManifest) {
return
}
@ -919,10 +912,8 @@ func (ra *RepositoryAPI) Put() {
return
}
project, _ := utils.ParseRepository(name)
resource := rbac.NewProjectNamespace(project).Resource(rbac.ResourceRepository)
if !ra.SecurityCtx.Can(rbac.ActionUpdate, resource) {
ra.SendForbiddenError(errors.New(ra.SecurityCtx.GetUsername()))
projectName, _ := utils.ParseRepository(name)
if !ra.RequireProjectAccess(projectName, rbac.ActionUpdate, rbac.ResourceRepository) {
return
}
@ -958,13 +949,7 @@ func (ra *RepositoryAPI) GetSignatures() {
return
}
resource := rbac.NewProjectNamespace(projectName).Resource(rbac.ResourceRepository)
if !ra.SecurityCtx.Can(rbac.ActionRead, resource) {
if !ra.SecurityCtx.IsAuthenticated() {
ra.SendUnAuthorizedError(errors.New("Unauthorized"))
return
}
ra.SendForbiddenError(errors.New(ra.SecurityCtx.GetUsername()))
if !ra.RequireProjectAccess(projectName, rbac.ActionRead, rbac.ResourceRepository) {
return
}
@ -1004,9 +989,7 @@ func (ra *RepositoryAPI) ScanImage() {
return
}
resource := rbac.NewProjectNamespace(projectName).Resource(rbac.ResourceRepositoryTagScanJob)
if !ra.SecurityCtx.Can(rbac.ActionCreate, resource) {
ra.SendForbiddenError(errors.New(ra.SecurityCtx.GetUsername()))
if !ra.RequireProjectAccess(projectName, rbac.ActionCreate, rbac.ResourceRepositoryTagScanJob) {
return
}
err = coreutils.TriggerImageScan(repoName, tag)
@ -1035,15 +1018,9 @@ func (ra *RepositoryAPI) VulnerabilityDetails() {
ra.SendNotFoundError(fmt.Errorf("resource: %s:%s not found", repository, tag))
return
}
project, _ := utils.ParseRepository(repository)
resource := rbac.NewProjectNamespace(project).Resource(rbac.ResourceRepositoryTagVulnerability)
if !ra.SecurityCtx.Can(rbac.ActionList, resource) {
if !ra.SecurityCtx.IsAuthenticated() {
ra.SendUnAuthorizedError(errors.New("Unauthorized"))
return
}
ra.SendForbiddenError(errors.New(ra.SecurityCtx.GetUsername()))
projectName, _ := utils.ParseRepository(repository)
if !ra.RequireProjectAccess(projectName, rbac.ActionList, rbac.ResourceRepositoryTagVulnerability) {
return
}
res, err := scan.VulnListByDigest(digest)

View File

@ -91,19 +91,8 @@ func (r *RepositoryLabelAPI) requireAccess(action rbac.Action, subresource ...rb
if len(subresource) == 0 {
subresource = append(subresource, rbac.ResourceRepositoryLabel)
}
resource := rbac.NewProjectNamespace(r.repository.ProjectID).Resource(rbac.ResourceRepositoryLabel)
if !r.SecurityCtx.Can(action, resource) {
if !r.SecurityCtx.IsAuthenticated() {
r.SendUnAuthorizedError(errors.New("UnAuthorized"))
} else {
r.SendForbiddenError(errors.New(r.SecurityCtx.GetUsername()))
}
return false
}
return true
return r.RequireProjectAccess(r.repository.ProjectID, action, subresource...)
}
func (r *RepositoryLabelAPI) isValidLabelReq() bool {

View File

@ -67,25 +67,19 @@ func (r *RetentionAPI) GetMetadatas() {
]
},
{
"rule_template": "nothing",
"display_text": "none",
"action": "retain",
"params": []
},
{
"rule_template": "always",
"display_text": "always",
"action": "retain",
"params": [
{
"type": "int",
"unit": "COUNT",
"required": true
}
]
},
"rule_template": "nDaysSinceLastPush",
"display_text": "pushed within the last # days",
"action": "retain",
"params": [
{
"type": "int",
"unit": "DAYS",
"required": true
}
]
},
{
"rule_template": "dayspl",
"rule_template": "nDaysSinceLastPull",
"display_text": "pulled within the last # days",
"action": "retain",
"params": [
@ -97,17 +91,11 @@ func (r *RetentionAPI) GetMetadatas() {
]
},
{
"rule_template": "daysps",
"display_text": "pushed within the last # days",
"action": "retain",
"params": [
{
"type": "int",
"unit": "DAYS",
"required": true
}
]
}
"rule_template": "always",
"display_text": "always",
"action": "retain",
"params": []
}
],
"scope_selectors": [
{
@ -120,14 +108,6 @@ func (r *RetentionAPI) GetMetadatas() {
}
],
"tag_selectors": [
{
"display_text": "Labels",
"kind": "label",
"decorations": [
"withLabels",
"withoutLabels"
]
},
{
"display_text": "Tags",
"kind": "doublestar",
@ -244,7 +224,7 @@ func (r *RetentionAPI) checkRuleConflict(p *policy.Metadata) error {
if old, exists := temp[string(bs)]; exists {
return fmt.Errorf("rule %d is conflict with rule %d", n, old)
}
temp[string(bs)] = tid
temp[string(bs)] = n
rule.ID = tid
}
return nil
@ -424,8 +404,7 @@ func (r *RetentionAPI) requireAccess(p *policy.Metadata, action rbac.Action, sub
if len(subresources) == 0 {
subresources = append(subresources, rbac.ResourceTagRetention)
}
resource := rbac.NewProjectNamespace(p.Scope.Reference).Resource(subresources...)
hasPermission = r.SecurityCtx.Can(action, resource)
hasPermission, _ = r.HasProjectPermission(p.Scope.Reference, action, subresources...)
default:
hasPermission = r.SecurityCtx.IsSysAdmin()
}

View File

@ -17,16 +17,16 @@ package api
import (
"errors"
"fmt"
"net/http"
"strconv"
"time"
"github.com/goharbor/harbor/src/common"
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/rbac"
"github.com/goharbor/harbor/src/common/token"
"net/http"
"strconv"
"github.com/goharbor/harbor/src/core/config"
"time"
)
// RobotAPI ...
@ -91,13 +91,7 @@ func (r *RobotAPI) Prepare() {
}
func (r *RobotAPI) requireAccess(action rbac.Action) bool {
resource := rbac.NewProjectNamespace(r.project.ProjectID).Resource(rbac.ResourceRobot)
if !r.SecurityCtx.Can(action, resource) {
r.SendForbiddenError(errors.New(r.SecurityCtx.GetUsername()))
return false
}
return true
return r.RequireProjectAccess(r.project.ProjectID, action, rbac.ResourceRobot)
}
// Post ...

View File

@ -55,12 +55,10 @@ func (sj *ScanJobAPI) Prepare() {
sj.SendInternalServerError(errors.New("Failed to get Job data"))
return
}
projectName := strings.SplitN(data.Repository, "/", 2)[0]
resource := rbac.NewProjectNamespace(projectName).Resource(rbac.ResourceRepositoryTagScanJob)
if !sj.SecurityCtx.Can(rbac.ActionRead, resource) {
projectName := strings.SplitN(data.Repository, "/", 2)[0]
if !sj.RequireProjectAccess(projectName, rbac.ActionRead, rbac.ResourceRepositoryTagScanJob) {
log.Errorf("User does not have read permission for project: %s", projectName)
sj.SendForbiddenError(errors.New(sj.SecurityCtx.GetUsername()))
return
}
sj.projectName = projectName

View File

@ -612,7 +612,7 @@ func TestUsersCurrentPermissions(t *testing.T) {
assert := assert.New(t)
apiTest := newHarborAPI()
httpStatusCode, permissions, err := apiTest.UsersGetPermissions("current", "/project/library", *projAdmin)
httpStatusCode, permissions, err := apiTest.UsersGetPermissions("current", "/project/1", *projAdmin)
assert.Nil(err)
assert.Equal(int(200), httpStatusCode, "httpStatusCode should be 200")
assert.NotEmpty(permissions, "permissions should not be empty")
@ -622,11 +622,11 @@ func TestUsersCurrentPermissions(t *testing.T) {
assert.Equal(int(200), httpStatusCode, "httpStatusCode should be 200")
assert.Empty(permissions, "permissions should be empty")
httpStatusCode, _, err = apiTest.UsersGetPermissions(projAdminID, "/project/library", *projAdmin)
httpStatusCode, _, err = apiTest.UsersGetPermissions(projAdminID, "/project/1", *projAdmin)
assert.Nil(err)
assert.Equal(int(200), httpStatusCode, "httpStatusCode should be 200")
httpStatusCode, _, err = apiTest.UsersGetPermissions(projDeveloperID, "/project/library", *projAdmin)
httpStatusCode, _, err = apiTest.UsersGetPermissions(projDeveloperID, "/project/1", *projAdmin)
assert.Nil(err)
assert.Equal(int(403), httpStatusCode, "httpStatusCode should be 403")
}

View File

@ -38,7 +38,7 @@ func SyncRegistry(pm promgr.ProjectManager) error {
log.Infof("Start syncing repositories from registry to DB... ")
reposInRegistry, err := catalog()
reposInRegistry, err := Catalog()
if err != nil {
log.Error(err)
return err
@ -105,7 +105,8 @@ func SyncRegistry(pm promgr.ProjectManager) error {
return nil
}
func catalog() ([]string, error) {
// Catalog ...
func Catalog() ([]string, error) {
repositories := []string{}
rc, err := initRegistryClient()

View File

@ -211,8 +211,6 @@ func (a *Auth) fillInModel(u *models.User) error {
u.Comment = userEntryComment
if strings.Contains(u.Username, "@") {
u.Email = u.Username
} else {
u.Email = fmt.Sprintf("%s@placeholder.com", u.Username)
}
return nil
}

View File

@ -154,7 +154,7 @@ func TestAuth_PostAuthenticate(t *testing.T) {
},
expect: models.User{
Username: "jt",
Email: "jt@placeholder.com",
Email: "",
Realname: "jt",
Password: pwd,
Comment: userEntryComment,

View File

@ -124,8 +124,6 @@ func (l *Auth) OnBoardUser(u *models.User) error {
if u.Email == "" {
if strings.Contains(u.Username, "@") {
u.Email = u.Username
} else {
u.Email = u.Username + "@placeholder.com"
}
}
u.Password = "12345678AbC" // Password is not kept in local db

View File

@ -224,7 +224,7 @@ func TestOnBoardUser_02(t *testing.T) {
t.Errorf("Failed to onboard user")
}
assert.Equal(t, "sample02@placeholder.com", user.Email)
assert.Equal(t, "", user.Email)
dao.CleanUser(int64(user.UserID))
}

View File

@ -77,9 +77,8 @@ func fillEmailRealName(user *models.User) {
if len(user.Realname) == 0 {
user.Realname = user.Username
}
if len(user.Email) == 0 {
// TODO: handle the case when user.Username itself is an email address.
user.Email = user.Username + "@uaa.placeholder"
if len(user.Email) == 0 && strings.Contains(user.Username, "@") {
user.Email = user.Username
}
}

View File

@ -110,7 +110,7 @@ func TestOnBoardUser(t *testing.T) {
user, _ := dao.GetUser(models.User{Username: "test"})
assert.Equal("test", user.Realname)
assert.Equal("test", user.Username)
assert.Equal("test@uaa.placeholder", user.Email)
assert.Equal("", user.Email)
err3 := dao.ClearTable(models.UserTable)
assert.Nil(err3)
}
@ -128,7 +128,7 @@ func TestPostAuthenticate(t *testing.T) {
}
assert.Nil(err)
user, _ := dao.GetUser(models.User{Username: "test"})
assert.Equal("test@uaa.placeholder", user.Email)
assert.Equal("", user.Email)
um2.Email = "newEmail@new.com"
um2.Realname = "newName"
err2 := auth.PostAuthenticate(um2)
@ -145,7 +145,7 @@ func TestPostAuthenticate(t *testing.T) {
assert.Nil(err3)
user3, _ := dao.GetUser(models.User{Username: "test"})
assert.Equal(user3.UserID, um3.UserID)
assert.Equal("test@uaa.placeholder", user3.Email)
assert.Equal("", user3.Email)
assert.Equal("test", user3.Realname)
err4 := dao.ClearTable(models.UserTable)
assert.Nil(err4)

View File

@ -331,12 +331,14 @@ func Database() (*models.Database, error) {
database := &models.Database{}
database.Type = cfgMgr.Get(common.DatabaseType).GetString()
postgresql := &models.PostGreSQL{
Host: cfgMgr.Get(common.PostGreSQLHOST).GetString(),
Port: cfgMgr.Get(common.PostGreSQLPort).GetInt(),
Username: cfgMgr.Get(common.PostGreSQLUsername).GetString(),
Password: cfgMgr.Get(common.PostGreSQLPassword).GetString(),
Database: cfgMgr.Get(common.PostGreSQLDatabase).GetString(),
SSLMode: cfgMgr.Get(common.PostGreSQLSSLMode).GetString(),
Host: cfgMgr.Get(common.PostGreSQLHOST).GetString(),
Port: cfgMgr.Get(common.PostGreSQLPort).GetInt(),
Username: cfgMgr.Get(common.PostGreSQLUsername).GetString(),
Password: cfgMgr.Get(common.PostGreSQLPassword).GetString(),
Database: cfgMgr.Get(common.PostGreSQLDatabase).GetString(),
SSLMode: cfgMgr.Get(common.PostGreSQLSSLMode).GetString(),
MaxIdleConns: cfgMgr.Get(common.PostGreSQLMaxIdleConns).GetInt(),
MaxOpenConns: cfgMgr.Get(common.PostGreSQLMaxOpenConns).GetInt(),
}
database.PostGreSQL = postgresql
@ -520,6 +522,11 @@ func NotificationEnable() bool {
return cfgMgr.Get(common.NotificationEnable).GetBool()
}
// QuotaPerProjectEnable returns a bool to indicates if quota per project enabled in harbor
func QuotaPerProjectEnable() bool {
return cfgMgr.Get(common.QuotaPerProjectEnable).GetBool()
}
// QuotaSetting returns the setting of quota.
func QuotaSetting() (*models.QuotaSetting, error) {
if err := cfgMgr.Load(); err != nil {

View File

@ -17,6 +17,9 @@ package controllers
import (
"encoding/json"
"fmt"
"net/http"
"strings"
"github.com/goharbor/harbor/src/common"
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/common/models"
@ -26,8 +29,6 @@ import (
"github.com/goharbor/harbor/src/core/api"
"github.com/goharbor/harbor/src/core/config"
"github.com/pkg/errors"
"net/http"
"strings"
)
const tokenKey = "oidc_token"
@ -189,9 +190,6 @@ func (oc *OIDCController) Onboard() {
}
email := d.Email
if email == "" {
email = utils.GenerateRandomString() + "@placeholder.com"
}
user := models.User{
Username: username,
Realname: d.Username,

View File

@ -17,16 +17,12 @@ package main
import (
"encoding/gob"
"fmt"
"os"
"os/signal"
"strconv"
"syscall"
"github.com/astaxie/beego"
_ "github.com/astaxie/beego/session/redis"
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/common/job"
"github.com/goharbor/harbor/src/common/models"
common_quota "github.com/goharbor/harbor/src/common/quota"
"github.com/goharbor/harbor/src/common/utils"
"github.com/goharbor/harbor/src/common/utils/log"
"github.com/goharbor/harbor/src/core/api"
@ -34,6 +30,15 @@ import (
_ "github.com/goharbor/harbor/src/core/auth/db"
_ "github.com/goharbor/harbor/src/core/auth/ldap"
_ "github.com/goharbor/harbor/src/core/auth/uaa"
"os"
"os/signal"
"strconv"
"syscall"
quota "github.com/goharbor/harbor/src/core/api/quota"
_ "github.com/goharbor/harbor/src/core/api/quota/chart"
_ "github.com/goharbor/harbor/src/core/api/quota/registry"
"github.com/goharbor/harbor/src/core/config"
"github.com/goharbor/harbor/src/core/filter"
"github.com/goharbor/harbor/src/core/middlewares"
@ -41,6 +46,7 @@ import (
"github.com/goharbor/harbor/src/core/service/token"
"github.com/goharbor/harbor/src/pkg/notification"
"github.com/goharbor/harbor/src/pkg/scheduler"
"github.com/goharbor/harbor/src/pkg/types"
"github.com/goharbor/harbor/src/replication"
)
@ -67,13 +73,71 @@ func updateInitPassword(userID int, password string) error {
return fmt.Errorf("Failed to update user encrypted password, userID: %d, err: %v", userID, err)
}
log.Infof("User id: %d updated its encypted password successfully.", userID)
log.Infof("User id: %d updated its encrypted password successfully.", userID)
} else {
log.Infof("User id: %d already has its encrypted password.", userID)
}
return nil
}
// Quota migration
func quotaSync() error {
usages, err := dao.ListQuotaUsages()
if err != nil {
log.Errorf("list quota usage error, %v", err)
return err
}
projects, err := dao.GetProjects(nil)
if err != nil {
log.Errorf("list project error, %v", err)
return err
}
// The condition handles these two cases:
// 1, len(project) > 1 && len(usages) == 1. existing projects without usage, as we do always has 'library' usage in DB.
// 2, migration fails at the phase of inserting usage into DB, and parts of them are inserted successfully.
if len(projects) != len(usages) {
log.Info("Start to sync quota data .....")
if err := quota.Sync(config.GlobalProjectMgr, true); err != nil {
log.Errorf("Fail to sync quota data, %v", err)
return err
}
log.Info("Success to sync quota data .....")
return nil
}
// Only has one project without usage
zero := common_quota.ResourceList{
common_quota.ResourceCount: 0,
common_quota.ResourceStorage: 0,
}
if len(projects) == 1 && len(usages) == 1 {
totalRepo, err := dao.GetTotalOfRepositories()
if totalRepo == 0 {
return nil
}
refID, err := strconv.ParseInt(usages[0].ReferenceID, 10, 64)
if err != nil {
log.Error(err)
return err
}
usedRes, err := types.NewResourceList(usages[0].Used)
if err != nil {
log.Error(err)
return err
}
if types.Equals(usedRes, zero) && refID == projects[0].ProjectID {
log.Info("Start to sync quota data .....")
if err := quota.Sync(config.GlobalProjectMgr, true); err != nil {
log.Errorf("Fail to sync quota data, %v", err)
return err
}
log.Info("Success to sync quota data .....")
}
}
return nil
}
func gracefulShutdown(closing chan struct{}) {
signals := make(chan os.Signal, 1)
signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
@ -117,7 +181,7 @@ func main() {
password, err := config.InitialAdminPassword()
if err != nil {
log.Fatalf("failed to get admin's initia password: %v", err)
log.Fatalf("failed to get admin's initial password: %v", err)
}
if err := updateInitPassword(adminUserID, password); err != nil {
log.Error(err)
@ -174,6 +238,9 @@ func main() {
log.Fatalf("init proxy error, %v", err)
}
// go proxy.StartProxy()
if err := quotaSync(); err != nil {
log.Fatalf("quota migration error, %v", err)
}
beego.Run()
}

Some files were not shown because too many files have changed in this diff Show More