Merge pull request #8362 from ninjadq/non-root-contaienr

Non root contaienr
This commit is contained in:
Qian Deng 2019-08-08 17:34:25 +08:00 committed by GitHub
commit a935823e3d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
35 changed files with 268 additions and 232 deletions

View File

@ -242,7 +242,7 @@ PACKAGE_ONLINE_PARA=-zcvf harbor-online-installer-$(PKGVERSIONTAG).tgz \
$(HARBORPKG)/install.sh \
$(HARBORPKG)/harbor.yml
DOCKERCOMPOSE_LIST=-f $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)
DOCKERCOMPOSE_FILE_OPT=-f $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)
ifeq ($(NOTARYFLAG), true)
DOCKERSAVE_PARA+= goharbor/notary-server-photon:$(NOTARYVERSION)-$(VERSIONTAG) goharbor/notary-signer-photon:$(NOTARYVERSION)-$(VERSIONTAG)
@ -412,17 +412,16 @@ pushimage:
start:
@echo "loading harbor images..."
@$(DOCKERCOMPOSECMD) $(DOCKERCOMPOSE_LIST) up -d
@$(DOCKERCOMPOSECMD) $(DOCKERCOMPOSE_FILE_OPT) up -d
@echo "Start complete. You can visit harbor now."
down:
@echo "Please make sure to set -e NOTARYFLAG=true/CLAIRFLAG=true/CHARTFLAG=true if you are using Notary/CLAIR/Chartmuseum in Harbor, otherwise the Notary/CLAIR/Chartmuseum containers cannot be stopped automatically."
@while [ -z "$$CONTINUE" ]; do \
read -r -p "Type anything but Y or y to exit. [Y/N]: " CONTINUE; \
done ; \
[ $$CONTINUE = "y" ] || [ $$CONTINUE = "Y" ] || (echo "Exiting."; exit 1;)
@echo "stoping harbor instance..."
@$(DOCKERCOMPOSECMD) $(DOCKERCOMPOSE_LIST) down -v
@$(DOCKERCOMPOSECMD) $(DOCKERCOMPOSE_FILE_OPT) down -v
@echo "Done."
swagger_client:

View File

@ -6,11 +6,11 @@ RUN tdnf install sudo -y >> /dev/null\
&& mkdir /harbor/
HEALTHCHECK CMD curl --fail -s http://127.0.0.1:8080/api/ping || exit 1
COPY ./make/photon/core/harbor_core ./make/photon/core/start.sh ./UIVERSION /harbor/
COPY ./make/photon/core/harbor_core ./UIVERSION /harbor/
COPY ./src/core/views /harbor/views
COPY ./make/migrations /harbor/migrations
RUN chmod u+x /harbor/start.sh /harbor/harbor_core
RUN chmod u+x /harbor/harbor_core
WORKDIR /harbor/
ENTRYPOINT ["/harbor/start.sh"]
USER harbor
ENTRYPOINT ["/harbor/harbor_core"]

View File

@ -1,3 +0,0 @@
#!/bin/sh
sudo -E -u \#10000 "/harbor/harbor_core"

View File

@ -18,15 +18,16 @@ RUN tdnf erase -y toybox && tdnf install -y util-linux net-tools
VOLUME /var/lib/postgresql/data
ADD ./make/photon/db/docker-entrypoint.sh /entrypoint.sh
ADD ./make/photon/db/docker-healthcheck.sh /docker-healthcheck.sh
RUN chmod u+x /entrypoint.sh /docker-healthcheck.sh
ENTRYPOINT ["/entrypoint.sh"]
HEALTHCHECK CMD ["/docker-healthcheck.sh"]
COPY ./make/photon/db/docker-entrypoint.sh /docker-entrypoint.sh
COPY ./make/photon/db/docker-healthcheck.sh /docker-healthcheck.sh
COPY ./make/photon/db/initial-notaryserver.sql /docker-entrypoint-initdb.d/
COPY ./make/photon/db/initial-notarysigner.sql /docker-entrypoint-initdb.d/
COPY ./make/photon/db/initial-registry.sql /docker-entrypoint-initdb.d/
RUN chown -R postgres:postgres /docker-entrypoint.sh /docker-healthcheck.sh /docker-entrypoint-initdb.d \
&& chmod u+x /docker-entrypoint.sh /docker-healthcheck.sh
ENTRYPOINT ["/docker-entrypoint.sh"]
HEALTHCHECK CMD ["/docker-healthcheck.sh"]
EXPOSE 5432
CMD ["postgres"]
USER postgres

View File

@ -23,95 +23,88 @@ file_env() {
unset "$fileVar"
}
if [ "${1:0:1}" = '-' ]; then
set -- postgres "$@"
fi
if [ "$1" = 'postgres' ]; then
chown -R postgres:postgres $PGDATA
# look specifically for PG_VERSION, as it is expected in the DB dir
if [ ! -s "$PGDATA/PG_VERSION" ]; then
file_env 'POSTGRES_INITDB_ARGS'
if [ "$POSTGRES_INITDB_XLOGDIR" ]; then
export POSTGRES_INITDB_ARGS="$POSTGRES_INITDB_ARGS --xlogdir $POSTGRES_INITDB_XLOGDIR"
fi
su - $1 -c "initdb -D $PGDATA -U postgres -E UTF-8 --lc-collate=en_US.UTF-8 --lc-ctype=en_US.UTF-8 $POSTGRES_INITDB_ARGS"
# check password first so we can output the warning before postgres
# messes it up
file_env 'POSTGRES_PASSWORD'
if [ "$POSTGRES_PASSWORD" ]; then
pass="PASSWORD '$POSTGRES_PASSWORD'"
authMethod=md5
else
# The - option suppresses leading tabs but *not* spaces. :)
cat >&2 <<-EOF
****************************************************
WARNING: No password has been set for the database.
This will allow anyone with access to the
Postgres port to access your database. In
Docker's default configuration, this is
effectively any other container on the same
system.
Use "-e POSTGRES_PASSWORD=password" to set
it in "docker run".
****************************************************
# look specifically for PG_VERSION, as it is expected in the DB dir
if [ ! -s "$PGDATA/PG_VERSION" ]; then
file_env 'POSTGRES_INITDB_ARGS'
if [ "$POSTGRES_INITDB_XLOGDIR" ]; then
export POSTGRES_INITDB_ARGS="$POSTGRES_INITDB_ARGS --xlogdir $POSTGRES_INITDB_XLOGDIR"
fi
initdb -D $PGDATA -U postgres -E UTF-8 --lc-collate=en_US.UTF-8 --lc-ctype=en_US.UTF-8 $POSTGRES_INITDB_ARGS
# check password first so we can output the warning before postgres
# messes it up
file_env 'POSTGRES_PASSWORD'
if [ "$POSTGRES_PASSWORD" ]; then
pass="PASSWORD '$POSTGRES_PASSWORD'"
authMethod=md5
else
# The - option suppresses leading tabs but *not* spaces. :)
cat >&2 <<-EOF
****************************************************
WARNING: No password has been set for the database.
This will allow anyone with access to the
Postgres port to access your database. In
Docker's default configuration, this is
effectively any other container on the same
system.
Use "-e POSTGRES_PASSWORD=password" to set
it in "docker run".
****************************************************
EOF
pass=
authMethod=trust
fi
pass=
authMethod=trust
fi
{
echo
echo "host all all all $authMethod"
} >> "$PGDATA/pg_hba.conf"
su postgres
echo `whoami`
# internal start of server in order to allow set-up using psql-client
# does not listen on external TCP/IP and waits until start finishes
su - $1 -c "pg_ctl -D \"$PGDATA\" -o \"-c listen_addresses='localhost'\" -w start"
{
echo
echo "host all all all $authMethod"
} >> "$PGDATA/pg_hba.conf"
echo `whoami`
# internal start of server in order to allow set-up using psql-client
# does not listen on external TCP/IP and waits until start finishes
pg_ctl -D "$PGDATA" -o "-c listen_addresses=''" -w start
file_env 'POSTGRES_USER' 'postgres'
file_env 'POSTGRES_DB' "$POSTGRES_USER"
file_env 'POSTGRES_USER' 'postgres'
file_env 'POSTGRES_DB' "$POSTGRES_USER"
psql=( psql -v ON_ERROR_STOP=1 )
psql=( psql -v ON_ERROR_STOP=1 )
if [ "$POSTGRES_DB" != 'postgres' ]; then
"${psql[@]}" --username postgres <<-EOSQL
CREATE DATABASE "$POSTGRES_DB" ;
EOSQL
echo
fi
if [ "$POSTGRES_USER" = 'postgres' ]; then
op='ALTER'
else
op='CREATE'
fi
if [ "$POSTGRES_DB" != 'postgres' ]; then
"${psql[@]}" --username postgres <<-EOSQL
$op USER "$POSTGRES_USER" WITH SUPERUSER $pass ;
CREATE DATABASE "$POSTGRES_DB" ;
EOSQL
echo
psql+=( --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" )
echo
for f in /docker-entrypoint-initdb.d/*; do
case "$f" in
*.sh) echo "$0: running $f"; . "$f" ;;
*.sql) echo "$0: running $f"; "${psql[@]}" -f "$f"; echo ;;
*.sql.gz) echo "$0: running $f"; gunzip -c "$f" | "${psql[@]}"; echo ;;
*) echo "$0: ignoring $f" ;;
esac
echo
done
PGUSER="${PGUSER:-postgres}" \
su - $1 -c "pg_ctl -D \"$PGDATA\" -m fast -w stop"
echo
echo 'PostgreSQL init process complete; ready for start up.'
echo
fi
if [ "$POSTGRES_USER" = 'postgres' ]; then
op='ALTER'
else
op='CREATE'
fi
"${psql[@]}" --username postgres <<-EOSQL
$op USER "$POSTGRES_USER" WITH SUPERUSER $pass ;
EOSQL
echo
psql+=( --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" )
echo
for f in /docker-entrypoint-initdb.d/*; do
case "$f" in
*.sh) echo "$0: running $f"; . "$f" ;;
*.sql) echo "$0: running $f"; "${psql[@]}" -f "$f"; echo ;;
*.sql.gz) echo "$0: running $f"; gunzip -c "$f" | "${psql[@]}"; echo ;;
*) echo "$0: ignoring $f" ;;
esac
echo
done
PGUSER="${PGUSER:-postgres}" \
pg_ctl -D "$PGDATA" -m fast -w stop
echo
echo 'PostgreSQL init process complete; ready for start up.'
echo
fi
exec su - $1 -c "$@ -D $PGDATA"
postgres -D $PGDATA

View File

@ -1,13 +1,19 @@
FROM photon:2.0
RUN mkdir /harbor/ \
&& tdnf install sudo -y >> /dev/null\
RUN tdnf install sudo -y >> /dev/null\
&& tdnf clean all \
&& groupadd -r -g 10000 harbor && useradd --no-log-init -r -g 10000 -u 10000 harbor
&& groupadd -r -g 10000 harbor && useradd --no-log-init -r -g 10000 -u 10000 harbor
COPY ./make/photon/jobservice/start.sh ./make/photon/jobservice/harbor_jobservice /harbor/
COPY ./make/photon/jobservice/harbor_jobservice /harbor/
RUN chmod u+x /harbor/harbor_jobservice
RUN chmod u+x /harbor/harbor_jobservice /harbor/start.sh
RUN mkdir -p /var/log/jobs
WORKDIR /harbor/
ENTRYPOINT ["/harbor/start.sh"]
USER harbor
VOLUME ["/var/log/jobs/"]
HEALTHCHECK CMD curl --fail -s http://127.0.0.1:8080/api/v1/stats || exit 1
ENTRYPOINT ["/harbor/harbor_jobservice", "-c", "/etc/jobservice/config.yml"]

View File

@ -1,6 +0,0 @@
#!/bin/sh
if [ -d /var/log/jobs ]; then
chown -R 10000:10000 /var/log/jobs/
fi
sudo -E -u \#10000 "/harbor/harbor_jobservice" "-c" "/etc/jobservice/config.yml"

View File

@ -1,14 +1,19 @@
FROM photon:2.0
RUN tdnf install -y nginx >> /dev/null\
RUN tdnf install sudo nginx -y >> /dev/null\
&& tdnf clean all \
&& groupadd -r -g 10000 nginx && useradd --no-log-init -r -g 10000 -u 10000 nginx \
&& ln -sf /dev/stdout /var/log/nginx/access.log \
&& ln -sf /dev/stderr /var/log/nginx/error.log \
&& tdnf clean all
&& ln -sf /dev/stderr /var/log/nginx/error.log
EXPOSE 80
VOLUME /var/cache/nginx /var/log/nginx /run
EXPOSE 8080
STOPSIGNAL SIGQUIT
HEALTHCHECK CMD curl --fail -s http://127.0.0.1 || exit 1
HEALTHCHECK CMD curl --fail -s http://127.0.0.1:8080 || exit 1
USER nginx
CMD ["nginx", "-g", "daemon off;"]

View File

@ -1,2 +0,0 @@
#!/bin/sh
sudo -E -u \#10000 sh -c "migrate-patch -database='${DB_URL}' && /migrations/migrate.sh && /bin/notary-server -config=/etc/notary/server-config.postgres.json -logf=logfmt"

View File

@ -4,12 +4,12 @@ RUN tdnf install -y shadow sudo \
&& tdnf clean all \
&& groupadd -r -g 10000 notary \
&& useradd --no-log-init -r -g 10000 -u 10000 notary
COPY ./make/photon/notary/migrate-patch /bin/migrate-patch
COPY ./make/photon/notary/binary/notary-server /bin/notary-server
COPY ./make/photon/notary/binary/migrate /bin/migrate
COPY ./make/photon/notary/binary/migrations/ /migrations/
COPY ./make/photon/notary/server-start.sh /bin/server-start.sh
RUN chmod +x /bin/notary-server /migrations/migrate.sh /bin/migrate /bin/migrate-patch /bin/server-start.sh
RUN chmod +x /bin/notary-server /migrations/migrate.sh /bin/migrate /bin/migrate-patch
ENV SERVICE_NAME=notary_server
ENTRYPOINT [ "/bin/server-start.sh" ]
USER notary
CMD migrate-patch -database=${DB_URL} && /migrations/migrate.sh && /bin/notary-server -config=/etc/notary/server-config.postgres.json -logf=logfmt

View File

@ -1,2 +0,0 @@
#!/bin/sh
sudo -E -u \#10000 sh -c "migrate-patch -database='${DB_URL}' && /migrations/migrate.sh && /bin/notary-signer -config=/etc/notary/signer-config.postgres.json -logf=logfmt"

View File

@ -8,8 +8,8 @@ COPY ./make/photon/notary/migrate-patch /bin/migrate-patch
COPY ./make/photon/notary/binary/notary-signer /bin/notary-signer
COPY ./make/photon/notary/binary/migrate /bin/migrate
COPY ./make/photon/notary/binary/migrations/ /migrations/
COPY ./make/photon/notary/signer-start.sh /bin/signer-start.sh
RUN chmod +x /bin/notary-signer /migrations/migrate.sh /bin/migrate /bin/migrate-patch /bin/signer-start.sh
RUN chmod +x /bin/notary-signer /migrations/migrate.sh /bin/migrate /bin/migrate-patch
ENV SERVICE_NAME=notary_signer
ENTRYPOINT [ "/bin/signer-start.sh" ]
USER notary
CMD migrate-patch -database=${DB_URL} && /migrations/migrate.sh && /bin/notary-signer -config=/etc/notary/signer-config.postgres.json -logf=logfmt

View File

@ -1,39 +1,42 @@
FROM node:10.15.0 as nodeportal
RUN mkdir -p /portal_src
RUN mkdir -p /build_dir
COPY make/photon/portal/entrypoint.sh /
COPY src/portal /portal_src
COPY ./docs/swagger.yaml /portal_src
COPY ./docs/swagger.yaml /portal_src
WORKDIR /portal_src
WORKDIR /build_dir
RUN npm install && \
chmod u+x /entrypoint.sh
RUN /entrypoint.sh
VOLUME ["/portal_src"]
RUN cp -r /portal_src/* /build_dir \
&& ls -la \
&& apt-get update \
&& apt-get install -y --no-install-recommends python-yaml=3.12-1 \
&& python -c 'import sys, yaml, json; y=yaml.load(sys.stdin.read()); print json.dumps(y)' < swagger.yaml > swagger.json \
&& npm install \
&& npm run build_lib \
&& npm run link_lib \
&& npm run release
FROM photon:2.0
RUN tdnf install -y nginx >> /dev/null \
&& ln -sf /dev/stdout /var/log/nginx/access.log \
&& ln -sf /dev/stderr /var/log/nginx/error.log \
&& tdnf clean all
EXPOSE 80
VOLUME /var/cache/nginx /var/log/nginx /run
COPY --from=nodeportal /build_dir/dist /usr/share/nginx/html
COPY --from=nodeportal /build_dir/swagger.yaml /usr/share/nginx/html
COPY --from=nodeportal /build_dir/swagger.json /usr/share/nginx/html
COPY make/photon/portal/nginx.conf /etc/nginx/nginx.conf
RUN tdnf install -y nginx sudo >> /dev/null \
&& ln -sf /dev/stdout /var/log/nginx/access.log \
&& ln -sf /dev/stderr /var/log/nginx/error.log \
&& groupadd -r -g 10000 nginx && useradd --no-log-init -r -g 10000 -u 10000 nginx \
&& chown -R nginx:nginx /etc/nginx \
&& tdnf clean all
EXPOSE 8080
VOLUME /var/cache/nginx /var/log/nginx /run
STOPSIGNAL SIGQUIT
HEALTHCHECK CMD curl --fail -s http://127.0.0.1 || exit 1
HEALTHCHECK CMD curl --fail -s http://127.0.0.1:8080 || exit 1
USER nginx
CMD ["nginx", "-g", "daemon off;"]

View File

@ -1,21 +0,0 @@
#!/bin/bash
set -e
cd /build_dir
cp -r /portal_src/* .
ls -la
# Update
apt-get update
apt-get install -y ruby
ruby -ryaml -rjson -e 'puts JSON.pretty_generate(YAML.load(ARGF))' swagger.yaml>swagger.json
cat ./package.json
npm install
## Build harbor-portal and link it
npm run build_lib
npm run link_lib
## Build production
npm run release

View File

@ -1,13 +1,21 @@
worker_processes 1;
worker_processes auto;
pid /tmp/nginx.pid;
events {
worker_connections 1024;
}
http {
client_body_temp_path /tmp/client_body_temp;
proxy_temp_path /tmp/proxy_temp;
fastcgi_temp_path /tmp/fastcgi_temp;
uwsgi_temp_path /tmp/uwsgi_temp;
scgi_temp_path /tmp/scgi_temp;
server {
listen 80;
listen 8080;
server_name localhost;
root /usr/share/nginx/html;

View File

@ -5,10 +5,17 @@ from pathlib import Path
DEFAULT_UID = 10000
DEFAULT_GID = 10000
PG_UID = 999
PG_GID = 999
REDIS_UID = 999
REDIS_GID = 999
## Global variable
base_dir = '/harbor_make'
templates_dir = "/usr/src/app/templates"
config_dir = '/config'
data_dir = '/data'
secret_dir = '/secret'
secret_key_dir='/secret/keys'

View File

@ -16,6 +16,7 @@ from utils.clair import prepare_clair
from utils.chart import prepare_chartmuseum
from utils.docker_compose import prepare_docker_compose
from utils.nginx import prepare_nginx, nginx_confd_dir
from utils.redis import prepare_redis
from g import (config_dir, input_config_path, private_key_pem_path, root_crt_path, secret_key_dir,
old_private_key_pem_path, old_crt_path)
@ -38,6 +39,7 @@ def main(conf, with_notary, with_clair, with_chartmuseum):
prepare_registry_ctl(config_dict)
prepare_db(config_dict)
prepare_job_service(config_dict)
prepare_redis(config_dict)
get_secret_key(secret_key_dir)

View File

@ -290,9 +290,9 @@ services:
{% endif %}
dns_search: .
ports:
- {{http_port}}:80
- {{http_port}}:8080
{% if protocol == 'https' %}
- {{https_port}}:443
- {{https_port}}:8443
{% endif %}
{% if with_notary %}
- 4443:4443

View File

@ -1,4 +1,5 @@
worker_processes auto;
pid /tmp/nginx.pid;
events {
worker_connections 1024;
@ -7,6 +8,11 @@ events {
}
http {
client_body_temp_path /tmp/client_body_temp;
proxy_temp_path /tmp/proxy_temp;
fastcgi_temp_path /tmp/fastcgi_temp;
uwsgi_temp_path /tmp/uwsgi_temp;
scgi_temp_path /tmp/scgi_temp;
tcp_nodelay on;
# this is necessary for us to be able to disable request buffering in all cases
@ -17,7 +23,7 @@ http {
}
upstream portal {
server portal:80;
server portal:8080;
}
log_format timed_combined '$remote_addr - '
@ -28,7 +34,7 @@ http {
access_log /dev/stdout timed_combined;
server {
listen 80;
listen 8080;
server_tokens off;
# disable any limits to avoid HTTP 413 for large image uploads
client_max_body_size 0;
@ -117,7 +123,7 @@ http {
proxy_request_buffering off;
}
location /service/notifications {
location /service/notifications {
return 404;
}
}

View File

@ -1,4 +1,5 @@
worker_processes auto;
pid /tmp/nginx.pid;
events {
worker_connections 1024;
@ -7,6 +8,11 @@ events {
}
http {
client_body_temp_path /tmp/client_body_temp;
proxy_temp_path /tmp/proxy_temp;
fastcgi_temp_path /tmp/fastcgi_temp;
uwsgi_temp_path /tmp/uwsgi_temp;
scgi_temp_path /tmp/scgi_temp;
tcp_nodelay on;
include /etc/nginx/conf.d/*.upstream.conf;
@ -18,7 +24,7 @@ http {
}
upstream portal {
server portal:80;
server portal:8080;
}
log_format timed_combined '$remote_addr - '
@ -31,7 +37,7 @@ http {
include /etc/nginx/conf.d/*.server.conf;
server {
listen 443 ssl;
listen 8443 ssl;
# server_name harbordomain.com;
server_tokens off;
# SSL
@ -141,10 +147,9 @@ http {
return 404;
}
}
server {
listen 80;
#server_name harbordomain.com;
return 308 https://$host$request_uri;
listen 8080;
#server_name harbordomain.com;
return 308 https://$host$request_uri;
}
}

View File

@ -2,12 +2,12 @@ import os, shutil
from g import templates_dir, config_dir, DEFAULT_UID, DEFAULT_GID
from .jinja import render_jinja
from .misc import prepare_config_dir
from .misc import prepare_dir
clair_template_dir = os.path.join(templates_dir, "clair")
def prepare_clair(config_dict):
clair_config_dir = prepare_config_dir(config_dir, "clair")
clair_config_dir = prepare_dir(config_dir, "clair")
if os.path.exists(os.path.join(clair_config_dir, "postgresql-init.d")):
print("Copying offline data file for clair DB")

View File

@ -1,7 +1,7 @@
import shutil, os
from g import config_dir, templates_dir
from utils.misc import prepare_config_dir, generate_random_string
from utils.misc import prepare_dir, generate_random_string
from utils.jinja import render_jinja
core_config_dir = os.path.join(config_dir, "core", "certificates")
@ -33,7 +33,7 @@ def prepare_core(config_dict, with_notary, with_clair, with_chartmuseum):
copy_core_config(core_conf_template_path, core_conf)
def prepare_core_config_dir():
prepare_config_dir(core_config_dir)
prepare_dir(core_config_dir)
def copy_core_config(core_templates_path, core_config_path):
shutil.copyfile(core_templates_path, core_config_path)

View File

@ -1,20 +1,18 @@
import os
from g import config_dir, templates_dir
from utils.misc import prepare_config_dir
from g import config_dir, templates_dir, data_dir, PG_UID, PG_GID
from utils.misc import prepare_dir
from utils.jinja import render_jinja
db_config_dir = os.path.join(config_dir, "db")
db_env_template_path = os.path.join(templates_dir, "db", "env.jinja")
db_conf_env = os.path.join(config_dir, "db", "env")
database_data_path = os.path.join(data_dir, 'database')
def prepare_db(config_dict):
prepare_db_config_dir()
prepare_dir(database_data_path, uid=PG_UID, gid=PG_GID)
prepare_dir(db_config_dir)
render_jinja(
db_env_template_path,
db_conf_env,
harbor_db_password=config_dict['harbor_db_password'])
def prepare_db_config_dir():
prepare_config_dir(db_config_dir)

View File

@ -1,7 +1,7 @@
import os
from g import config_dir, DEFAULT_GID, DEFAULT_UID, templates_dir
from utils.misc import prepare_config_dir
from utils.misc import prepare_dir
from utils.jinja import render_jinja
job_config_dir = os.path.join(config_dir, "jobservice")
@ -10,15 +10,14 @@ job_service_conf_env = os.path.join(config_dir, "jobservice", "env")
job_service_conf_template_path = os.path.join(templates_dir, "jobservice", "config.yml.jinja")
jobservice_conf = os.path.join(config_dir, "jobservice", "config.yml")
def prepare_job_service(config_dict):
prepare_config_dir(job_config_dir)
prepare_dir(job_config_dir, uid=DEFAULT_UID, gid=DEFAULT_GID)
log_level = config_dict['log_level'].upper()
# Job log is stored in data dir
job_log_dir = os.path.join('/data', "job_logs")
prepare_config_dir(job_log_dir)
prepare_dir(job_log_dir, uid=DEFAULT_UID, gid=DEFAULT_GID)
# Render Jobservice env
render_jinja(
job_service_env_template_path,
@ -33,4 +32,4 @@ def prepare_job_service(config_dict):
gid=DEFAULT_GID,
max_job_workers=config_dict['max_job_workers'],
redis_url=config_dict['redis_url_js'],
level=log_level)
level=log_level)

View File

@ -1,7 +1,7 @@
import os
from g import config_dir, templates_dir, DEFAULT_GID, DEFAULT_UID
from utils.misc import prepare_config_dir
from utils.misc import prepare_dir
from utils.jinja import render_jinja
log_config_dir = os.path.join(config_dir, "log")
@ -15,7 +15,7 @@ log_syslog_docker_template_path = os.path.join(templates_dir, 'log', 'rsyslog_do
log_syslog_docker_config = os.path.join(config_dir, 'log', 'rsyslog_docker.conf')
def prepare_log_configs(config_dict):
prepare_config_dir(log_config_dir)
prepare_dir(log_config_dir)
# Render Log config
render_jinja(

View File

@ -3,7 +3,7 @@ import string
import random
from g import DEFAULT_UID, DEFAULT_GID
from pathlib import Path
# To meet security requirement
# By default it will change file mode to 0600, and make the owner of the file to 10000:10000
@ -84,6 +84,26 @@ def prepare_config_dir(root, *name):
os.makedirs(absolute_path)
return absolute_path
def prepare_dir(root: str, *args, **kwargs) -> str:
gid, uid = kwargs.get('gid'), kwargs.get('uid')
absolute_path = Path(os.path.join(root, *args))
if absolute_path.is_file():
raise Exception('Path exists and the type is regular file')
mode = kwargs.get('mode') or 0o755
absolute_path.mkdir(mode, parents=True, exist_ok=True)
# if uid or gid not None, then change the ownership of this dir
if not(gid is None and uid is None):
dir_uid, dir_gid = absolute_path.stat().st_uid, absolute_path.stat().st_gid
if uid is None:
uid = dir_uid
if gid is None:
gid = dir_gid
os.chown(absolute_path, uid, gid)
return str(absolute_path)
def delfile(src):
if os.path.isfile(src):

View File

@ -2,8 +2,8 @@ import os, shutil
from fnmatch import fnmatch
from pathlib import Path
from g import config_dir, templates_dir
from utils.misc import prepare_config_dir, mark_file
from g import config_dir, templates_dir, DEFAULT_GID, DEFAULT_UID
from utils.misc import prepare_dir, mark_file
from utils.jinja import render_jinja
from utils.cert import SSL_CERT_KEY_PATH, SSL_CERT_PATH
@ -17,12 +17,16 @@ CUSTOM_NGINX_LOCATION_FILE_PATTERN_HTTPS = 'harbor.https.*.conf'
CUSTOM_NGINX_LOCATION_FILE_PATTERN_HTTP = 'harbor.http.*.conf'
def prepare_nginx(config_dict):
prepare_config_dir(nginx_confd_dir)
prepare_dir(nginx_confd_dir, uid=DEFAULT_UID, gid=DEFAULT_GID)
render_nginx_template(config_dict)
def render_nginx_template(config_dict):
if config_dict['protocol'] == "https":
render_jinja(nginx_https_conf_template, nginx_conf,
render_jinja(
nginx_https_conf_template,
nginx_conf,
uid=DEFAULT_UID,
gid=DEFAULT_GID,
ssl_cert=SSL_CERT_PATH,
ssl_cert_key=SSL_CERT_KEY_PATH)
location_file_pattern = CUSTOM_NGINX_LOCATION_FILE_PATTERN_HTTPS
@ -35,7 +39,9 @@ def render_nginx_template(config_dict):
else:
render_jinja(
nginx_http_conf_template,
nginx_conf)
nginx_conf,
uid=DEFAULT_UID,
gid=DEFAULT_GID)
location_file_pattern = CUSTOM_NGINX_LOCATION_FILE_PATTERN_HTTP
copy_nginx_location_configs_if_exist(nginx_template_ext_dir, nginx_confd_dir, location_file_pattern)

View File

@ -2,7 +2,7 @@ import os, shutil, pathlib
from g import templates_dir, config_dir, root_crt_path, secret_key_dir,DEFAULT_UID, DEFAULT_GID
from .cert import openssl_installed, create_cert, create_root_cert, get_alias
from .jinja import render_jinja
from .misc import mark_file, prepare_config_dir
from .misc import mark_file, prepare_dir
notary_template_dir = os.path.join(templates_dir, "notary")
notary_signer_pg_template = os.path.join(notary_template_dir, "signer-config.postgres.json.jinja")
@ -20,12 +20,12 @@ notary_server_env_path = os.path.join(notary_config_dir, "server_env")
def prepare_env_notary(nginx_config_dir):
notary_config_dir = prepare_config_dir(config_dir, "notary")
notary_config_dir = prepare_dir(config_dir, "notary")
old_signer_cert_secret_path = pathlib.Path(os.path.join(config_dir, 'notary-signer.crt'))
old_signer_key_secret_path = pathlib.Path(os.path.join(config_dir, 'notary-signer.key'))
old_signer_ca_cert_secret_path = pathlib.Path(os.path.join(config_dir, 'notary-signer-ca.crt'))
notary_secret_dir = prepare_config_dir('/secret/notary')
notary_secret_dir = prepare_dir('/secret/notary')
signer_cert_secret_path = pathlib.Path(os.path.join(notary_secret_dir, 'notary-signer.crt'))
signer_key_secret_path = pathlib.Path(os.path.join(notary_secret_dir, 'notary-signer.key'))
signer_ca_cert_secret_path = pathlib.Path(os.path.join(notary_secret_dir, 'notary-signer-ca.crt'))
@ -72,9 +72,12 @@ def prepare_env_notary(nginx_config_dir):
print("Copying nginx configuration file for notary")
shutil.copy2(
render_jinja(
os.path.join(templates_dir, "nginx", "notary.upstream.conf.jinja"),
os.path.join(nginx_config_dir, "notary.upstream.conf"))
os.path.join(nginx_config_dir, "notary.upstream.conf"),
gid=DEFAULT_GID,
uid=DEFAULT_UID)
mark_file(os.path.join(notary_secret_dir, "notary-signer.crt"))
mark_file(os.path.join(notary_secret_dir, "notary-signer.key"))
@ -88,6 +91,8 @@ def prepare_notary(config_dict, nginx_config_dir, ssl_cert_path, ssl_cert_key_pa
render_jinja(
notary_server_nginx_config_template,
os.path.join(nginx_config_dir, "notary.server.conf"),
gid=DEFAULT_GID,
uid=DEFAULT_UID,
ssl_cert=ssl_cert_path,
ssl_cert_key=ssl_cert_key_path)

View File

@ -0,0 +1,9 @@
import os
from g import data_dir, REDIS_UID, REDIS_GID
from utils.misc import prepare_dir
redis_data_path = os.path.join(data_dir, 'redis')
def prepare_redis(config_dict):
prepare_dir(redis_data_path, uid=REDIS_UID, gid=REDIS_GID)

View File

@ -1,7 +1,7 @@
import os, copy
from g import config_dir, templates_dir, DEFAULT_GID, DEFAULT_UID
from utils.misc import prepare_config_dir
from utils.misc import prepare_dir
from utils.jinja import render_jinja
@ -11,7 +11,7 @@ registry_conf = os.path.join(config_dir, "registry", "config.yml")
def prepare_registry(config_dict):
prepare_config_dir(registry_config_dir)
prepare_dir(registry_config_dir)
storage_provider_info = get_storage_provider_info(
config_dict['storage_provider_name'],

View File

@ -1,7 +1,7 @@
import os, shutil
from g import config_dir, templates_dir
from utils.misc import prepare_config_dir
from utils.misc import prepare_dir
from utils.jinja import render_jinja
registryctl_config_dir = os.path.join(config_dir, "registryctl")
@ -24,7 +24,7 @@ def prepare_registry_ctl(config_dict):
copy_registry_ctl_conf(registryctl_config_template_path, registryctl_conf)
def prepare_registry_ctl_config_dir():
prepare_config_dir(registryctl_config_dir)
prepare_dir(registryctl_config_dir)
def copy_registry_ctl_conf(src, dst):
shutil.copyfile(src, dst)

View File

@ -4,11 +4,12 @@ RUN tdnf install -y redis sudo
VOLUME /var/lib/redis
WORKDIR /var/lib/redis
COPY ./make/photon/redis/docker-entrypoint.sh /usr/bin/
COPY ./make/photon/redis/docker-healthcheck /usr/bin/
COPY ./make/photon/redis/redis.conf /etc/redis.conf
RUN chmod +x /usr/bin/docker-entrypoint.sh \
RUN chmod +x /usr/bin/docker-healthcheck \
&& chown redis:redis /etc/redis.conf
ENTRYPOINT ["docker-entrypoint.sh"]
HEALTHCHECK CMD ["docker-healthcheck"]
USER redis
EXPOSE 6379
CMD ["redis-server", "/etc/redis.conf"]

View File

@ -1,13 +0,0 @@
#!/bin/sh
set -e
if [ "${1#-}" != "$1" ] || [ "${1%.conf}" != "$1" ]; then
set -- redis-server "$@"
fi
if [ "$1" = 'redis-server' -a "$(id -u)" = '0' ]; then
chown -R redis .
exec sudo -u redis "$@"
fi
exec "$@"

View File

@ -0,0 +1,9 @@
#!/bin/bash
set -eo pipefail
if ping="$(redis-cli -h "127.0.0.1" ping)" && [ "$ping" = 'PONG' ]; then
exit 0
fi
exit 1

View File

@ -46,6 +46,7 @@ config_dir=$harbor_prepare_path/common/config
# Run prepare script
docker run --rm -v $input_dir:/input:z \
-v $data_path:/data:z \
-v $harbor_prepare_path:/compose_location:z \
-v $config_dir:/config:z \
-v $secret_dir:/secret:z \