19
.gitignore
vendored
|
@ -1,11 +1,14 @@
|
|||
harbor
|
||||
Deploy/config/registry/config.yml
|
||||
Deploy/config/ui/env
|
||||
Deploy/config/ui/app.conf
|
||||
Deploy/config/db/env
|
||||
Deploy/config/jobservice/env
|
||||
Deploy/ui/harbor_ui
|
||||
Deploy/jobservice/harbor_jobservice
|
||||
ui/ui
|
||||
make/common/config/registry/config.yml
|
||||
make/common/config/ui/env
|
||||
make/common/config/ui/app.conf
|
||||
make/common/config/db/env
|
||||
make/common/config/jobservice/env
|
||||
make/common/config/nginx/nginx.conf
|
||||
make/common/config/nginx/cert/*
|
||||
make/dev/ui/harbor_ui
|
||||
make/dev//jobservice/harbor_jobservice
|
||||
src/ui/ui
|
||||
src/jobservice/jobservice
|
||||
*.pyc
|
||||
jobservice/test
|
||||
|
|
16
.travis.yml
|
@ -32,12 +32,11 @@ env:
|
|||
|
||||
before_install:
|
||||
- sudo ./tests/hostcfg.sh
|
||||
- cd Deploy
|
||||
- sudo ./prepare
|
||||
- cd ..
|
||||
- sudo ./make/prepare
|
||||
|
||||
install:
|
||||
- sudo apt-get update && sudo apt-get install -y libldap2-dev
|
||||
- sudo apt-get install -y sqlite3
|
||||
# - sudo apt-get remove -y mysql-common mysql-server-5.5 mysql-server-core-5.5 mysql-client-5.5 mysql-client-core-5.5
|
||||
# - sudo apt-get autoremove -y
|
||||
# - sudo apt-get install -y libaio1
|
||||
|
@ -70,23 +69,24 @@ install:
|
|||
|
||||
before_script:
|
||||
# create tables and load data
|
||||
# - mysql < ./Deploy/db/registry.sql -uroot --verbose
|
||||
# - mysql < ./make/db/registry.sql -uroot --verbose
|
||||
- sudo sqlite3 /registry.db < make/common/db/registry_sqlite.sql
|
||||
|
||||
script:
|
||||
- sudo ./tests/testprepare.sh
|
||||
- docker-compose -f Deploy/docker-compose.test.yml up -d
|
||||
- docker-compose -f ./make/docker-compose.test.yml up -d
|
||||
- go list ./... | grep -v -E 'vendor|tests' | xargs -L1 fgt golint
|
||||
- go list ./... | grep -v -E 'vendor|tests' | xargs -L1 go vet
|
||||
- export MYSQL_HOST=$IP
|
||||
- export REGISTRY_URL=$IP:5000
|
||||
- echo $REGISTRY_URL
|
||||
- ./tests/pushimage.sh
|
||||
- ./Deploy/coverage4gotest.sh
|
||||
- ./tests/coverage4gotest.sh
|
||||
- goveralls -coverprofile=profile.cov -service=travis-ci
|
||||
|
||||
- docker-compose -f Deploy/docker-compose.test.yml down
|
||||
- docker-compose -f make/docker-compose.test.yml down
|
||||
|
||||
- docker-compose -f Deploy/docker-compose.yml up -d
|
||||
- docker-compose -f make/dev/docker-compose.yml up -d
|
||||
|
||||
- docker ps
|
||||
- go run tests/startuptest.go http://localhost/
|
||||
|
|
|
@ -1,19 +0,0 @@
|
|||
FROM library/ubuntu:14.04
|
||||
|
||||
# run logrotate hourly, disable imklog model, provides TCP/UDP syslog reception
|
||||
RUN mv /etc/cron.daily/logrotate /etc/cron.hourly/ \
|
||||
&& rm /etc/rsyslog.d/* \
|
||||
&& rm /etc/rsyslog.conf
|
||||
ADD rsyslog.conf /etc/rsyslog.conf
|
||||
|
||||
# logrotate configuration file for docker
|
||||
ADD logrotate_docker.conf /etc/logrotate.d/
|
||||
|
||||
# rsyslog configuration file for docker
|
||||
ADD rsyslog_docker.conf /etc/rsyslog.d/
|
||||
|
||||
VOLUME /var/log/docker/
|
||||
|
||||
EXPOSE 514
|
||||
|
||||
CMD cron && rsyslogd -n
|
|
@ -1,35 +0,0 @@
|
|||
# see "man logrotate" for details
|
||||
# rotate log files weekly
|
||||
weekly
|
||||
|
||||
# keep 4 weeks worth of backlogs
|
||||
rotate 4
|
||||
|
||||
# create new (empty) log files after rotating old ones
|
||||
create
|
||||
|
||||
# use date as a suffix of the rotated file
|
||||
dateext
|
||||
|
||||
# uncomment this if you want your log files compressed
|
||||
#compress
|
||||
|
||||
# RPM packages drop log rotation information into this directory
|
||||
include /etc/logrotate.d
|
||||
|
||||
# no packages own wtmp and btmp -- we'll rotate them here
|
||||
#/var/log/wtmp {
|
||||
# monthly
|
||||
# create 0664 root utmp
|
||||
# minsize 1M
|
||||
# rotate 1
|
||||
#}
|
||||
|
||||
/var/log/btmp {
|
||||
missingok
|
||||
monthly
|
||||
create 0600 root utmp
|
||||
rotate 1
|
||||
}
|
||||
|
||||
# system-specific logs may be also be configured here.
|
|
@ -1,7 +0,0 @@
|
|||
# Logrotate configuartion file for docker.
|
||||
|
||||
/var/log/docker/*/*.log {
|
||||
rotate 100
|
||||
size 10M
|
||||
copytruncate
|
||||
}
|
362
Makefile
Normal file
|
@ -0,0 +1,362 @@
|
|||
# Makefile for Harbor project
|
||||
#
|
||||
# Targets:
|
||||
#
|
||||
# all: prepare env, compile binarys, build images and install images
|
||||
# prepare: prepare env
|
||||
# compile: compile ui and jobservice code
|
||||
# compile_golangimage:
|
||||
# compile from golang image
|
||||
# for example: make compile_golangimage -e GOBUILDIMAGE= \
|
||||
# reg-bj.eng.vmware.com/harborrelease/harborgo:1.6.2
|
||||
# compile_ui, compile_jobservice: compile specific binary
|
||||
#
|
||||
# build: build Harbor docker images (defuault: build_photon)
|
||||
# for example: make build -e BASEIMAGE=photon
|
||||
# build_photon: build Harbor docker images from photon bsaeimage
|
||||
# build_ubuntu: build Harbor docker images from ubuntu baseimage
|
||||
#
|
||||
# install: include compile binarys, build images, prepare specific \
|
||||
# version composefile and startup Harbor instance
|
||||
#
|
||||
# start: startup Harbor instance
|
||||
#
|
||||
# down: shutdown Harbor instance
|
||||
#
|
||||
# package_online:
|
||||
# prepare online install package
|
||||
# for example: make package_online -e DEVFLAG=false\
|
||||
# REGISTRYSERVER=reg-bj.eng.vmware.com \
|
||||
# REGISTRYPROJECTNAME=harborrelease
|
||||
#
|
||||
# package_offline:
|
||||
# prepare offline install package
|
||||
#
|
||||
# pushimage: push Harbor images to specific registry server
|
||||
# for example: make pushimage -e DEVFLAG=false REGISTRYUSER=admin \
|
||||
# REGISTRYPASSWORD=***** \
|
||||
# REGISTRYSERVER=reg-bj.eng.vmware.com/ \
|
||||
# REGISTRYPROJECTNAME=harborrelease
|
||||
# note**: need add "/" on end of REGISTRYSERVER. If not setting \
|
||||
# this value will push images directly to dockerhub.
|
||||
# make pushimage -e DEVFLAG=false REGISTRYUSER=vmware \
|
||||
# REGISTRYPASSWORD=***** \
|
||||
# REGISTRYPROJECTNAME=vmware
|
||||
#
|
||||
# clean: remove binary, Harbor images, specific version docker-compose \
|
||||
# file, specific version tag and online/offline install package
|
||||
# cleanbinary: remove ui and jobservice binary
|
||||
# cleanimage: remove Harbor images
|
||||
# cleandockercomposefile:
|
||||
# remove specific version docker-compose
|
||||
# cleanversiontag:
|
||||
# cleanpackageremove specific version tag
|
||||
# cleanpackage: remove online/offline install package
|
||||
#
|
||||
# all: install
|
||||
#
|
||||
# other example:
|
||||
# clean specific version binarys and images:
|
||||
# make clean -e VERSIONTAG=[TAG]
|
||||
# note**: If commit new code to github, the git commit TAG will \
|
||||
# change. Better use this commond clean previous images and \
|
||||
# files with specific TAG.
|
||||
# By default DEVFLAG=true, if you want to release new version of Harbor, \
|
||||
# should setting the flag to false.
|
||||
# make XXXX -e DEVFLAG=false
|
||||
|
||||
SHELL := /bin/bash
|
||||
BUILDPATH=$(CURDIR)
|
||||
MAKEPATH=$(BUILDPATH)/make
|
||||
MAKEDEVPATH=$(MAKEPATH)/dev
|
||||
SRCPATH=./src
|
||||
TOOLSPATH=$(BUILDPATH)/tools
|
||||
GOBASEPATH=/go/src/github.com/vmware
|
||||
CHECKENVCMD=checkenv.sh
|
||||
BASEIMAGE=photon
|
||||
COMPILETAG=compile_normal
|
||||
REGISTRYSERVER=
|
||||
REGISTRYPROJECTNAME=vmware
|
||||
DEVFLAG=true
|
||||
|
||||
# docker parameters
|
||||
DOCKERCMD=$(shell which docker)
|
||||
DOCKERBUILD=$(DOCKERCMD) build
|
||||
DOCKERRMIMAGE=$(DOCKERCMD) rmi
|
||||
DOCKERPULL=$(DOCKERCMD) pull
|
||||
DOCKERIMASES=$(DOCKERCMD) images
|
||||
DOCKERSAVE=$(DOCKERCMD) save
|
||||
DOCKERCOMPOSECMD=$(shell which docker-compose)
|
||||
DOCKERTAG=$(DOCKERCMD) tag
|
||||
|
||||
# go parameters
|
||||
GOCMD=$(shell which go)
|
||||
GOBUILD=$(GOCMD) build
|
||||
GOCLEAN=$(GOCMD) clean
|
||||
GOINSTALL=$(GOCMD) install
|
||||
GOTEST=$(GOCMD) test
|
||||
GODEP=$(GOTEST) -i
|
||||
GOFMT=gofmt -w
|
||||
GOBUILDIMAGE=reg.mydomain.com/library/harborgo[:tag]
|
||||
GOBUILDPATH=$(GOBASEPATH)/harbor
|
||||
GOIMAGEBUILDCMD=/usr/local/go/bin/go
|
||||
GOIMAGEBUILD=$(GOIMAGEBUILDCMD) build
|
||||
GOBUILDPATH_UI=$(GOBUILDPATH)/src/ui
|
||||
GOBUILDPATH_JOBSERVICE=$(GOBUILDPATH)/src/jobservice
|
||||
GOBUILDMAKEPATH=$(GOBUILDPATH)/make
|
||||
GOBUILDMAKEPATH_UI=$(GOBUILDMAKEPATH)/dev/ui
|
||||
GOBUILDMAKEPATH_JOBSERVICE=$(GOBUILDMAKEPATH)/dev/jobservice
|
||||
|
||||
# binary
|
||||
UISOURCECODE=$(SRCPATH)/ui
|
||||
UIBINARYPATH=$(MAKEDEVPATH)/ui
|
||||
UIBINARYNAME=harbor_ui
|
||||
JOBSERVICESOURCECODE=$(SRCPATH)/jobservice
|
||||
JOBSERVICEBINARYPATH=$(MAKEDEVPATH)/jobservice
|
||||
JOBSERVICEBINARYNAME=harbor_jobservice
|
||||
|
||||
# prepare parameters
|
||||
PREPAREPATH=$(TOOLSPATH)
|
||||
PREPARECMD=prepare
|
||||
|
||||
# configfile
|
||||
CONFIGPATH=$(MAKEPATH)
|
||||
CONFIGFILE=harbor.cfg
|
||||
|
||||
# makefile
|
||||
MAKEFILEPATH_PHOTON=$(MAKEPATH)/photon
|
||||
MAKEFILEPATH_UBUNTU=$(MAKEPATH)/ubuntu
|
||||
|
||||
# common dockerfile
|
||||
DOCKERFILEPATH_COMMON=$(MAKEPATH)/common
|
||||
DOCKERFILEPATH_DB=$(DOCKERFILEPATH_COMMON)/db
|
||||
DOCKERFILENAME_DB=Dockerfile
|
||||
|
||||
# docker image name
|
||||
DOCKERIMAGENAME_UI=vmware/harbor-ui
|
||||
DOCKERIMAGENAME_JOBSERVICE=vmware/harbor-jobservice
|
||||
DOCKERIMAGENAME_LOG=vmware/harbor-log
|
||||
DOCKERIMAGENAME_DB=vmware/harbor-db
|
||||
|
||||
|
||||
# docker-compose files
|
||||
DOCKERCOMPOSEFILEPATH=$(MAKEPATH)
|
||||
DOCKERCOMPOSETPLFILENAME=docker-compose.tpl
|
||||
DOCKERCOMPOSEFILENAME=docker-compose.yml
|
||||
|
||||
# version prepare
|
||||
VERSIONFILEPATH=$(SRCPATH)/ui/views/sections
|
||||
VERSIONFILENAME=header-content.htm
|
||||
GITCMD=$(shell which git)
|
||||
GITTAG=$(GITCMD) describe --tags
|
||||
ifeq ($(DEVFLAG), true)
|
||||
VERSIONTAG=dev
|
||||
else
|
||||
VERSIONTAG=$(shell $(GITTAG))
|
||||
endif
|
||||
|
||||
SEDCMD=$(shell which sed)
|
||||
|
||||
# package
|
||||
TARCMD=$(shell which tar)
|
||||
ZIPCMD=$(shell which gzip)
|
||||
DOCKERIMGFILE=harbor
|
||||
HARBORPKG=harbor
|
||||
|
||||
# pushimage
|
||||
PUSHSCRIPTPATH=$(MAKEPATH)
|
||||
PUSHSCRIPTNAME=pushimage.sh
|
||||
REGISTRYUSER=user
|
||||
REGISTRYPASSWORD=default
|
||||
|
||||
version:
|
||||
if [ "$(DEVFLAG)" = "false" ] ; then \
|
||||
$(SEDCMD) -i 's/version=\"{{.Version}}\"/version=\"$(VERSIONTAG)\"/' -i $(VERSIONFILEPATH)/$(VERSIONFILENAME) ; \
|
||||
fi
|
||||
|
||||
check_environment:
|
||||
@$(MAKEPATH)/$(CHECKENVCMD)
|
||||
|
||||
compile_ui:
|
||||
@echo "compiling binary for ui..."
|
||||
$(GOBUILD) -o $(UIBINARYPATH)/$(UIBINARYNAME) $(UISOURCECODE)
|
||||
@echo "Done."
|
||||
|
||||
compile_jobservice:
|
||||
@echo "compiling binary for jobservice..."
|
||||
$(GOBUILD) -o $(JOBSERVICEBINARYPATH)/$(JOBSERVICEBINARYNAME) $(JOBSERVICESOURCECODE)
|
||||
@echo "Done."
|
||||
|
||||
compile_normal: compile_ui compile_jobservice
|
||||
|
||||
compile_golangimage:
|
||||
@echo "pulling golang build base image"
|
||||
$(DOCKERPULL) $(GOBUILDIMAGE)
|
||||
@echo "Done."
|
||||
|
||||
@echo "compiling binary for ui (golang image)..."
|
||||
@echo $(GOBASEPATH)
|
||||
@echo $(GOBUILDPATH)
|
||||
$(DOCKERCMD) run --rm -v $(BUILDPATH):$(GOBUILDPATH) -w $(GOBUILDPATH_UI) $(GOBUILDIMAGE) $(GOIMAGEBUILD) -v -o $(GOBUILDMAKEPATH_UI)/$(UIBINARYNAME)
|
||||
@echo "Done."
|
||||
|
||||
@echo "compiling binary for jobservice (golang image)..."
|
||||
$(DOCKERCMD) run --rm -v $(BUILDPATH):$(GOBUILDPATH) -w $(GOBUILDPATH_JOBSERVICE) $(GOBUILDIMAGE) $(GOIMAGEBUILD) -v -o $(GOBUILDMAKEPATH_JOBSERVICE)/$(JOBSERVICEBINARYNAME)
|
||||
@echo "Done."
|
||||
|
||||
compile:check_environment $(COMPILETAG)
|
||||
|
||||
prepare:
|
||||
@echo "preparing..."
|
||||
$(MAKEPATH)/$(PREPARECMD) -conf $(CONFIGPATH)/$(CONFIGFILE)
|
||||
|
||||
build_common: version
|
||||
@echo "buildging db container for photon..."
|
||||
cd $(DOCKERFILEPATH_DB) && $(DOCKERBUILD) -f $(DOCKERFILENAME_DB) -t $(DOCKERIMAGENAME_DB):$(VERSIONTAG) .
|
||||
@echo "Done."
|
||||
|
||||
build_photon: build_common
|
||||
make -f $(MAKEFILEPATH_PHOTON)/Makefile build -e DEVFLAG=$(DEVFLAG)
|
||||
|
||||
build_ubuntu: build_common
|
||||
make -f $(MAKEFILEPATH_UBUNTU)/Makefile build -e DEVFLAG=$(DEVFLAG)
|
||||
|
||||
build: build_$(BASEIMAGE)
|
||||
|
||||
modify_composefile:
|
||||
@echo "preparing tag:$(VERSIONTAG) docker-compose file..."
|
||||
@cp $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSETPLFILENAME) $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)
|
||||
@$(SEDCMD) -i 's/image\: vmware.*/&:$(VERSIONTAG)/g' $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)
|
||||
|
||||
install: compile build prepare modify_composefile
|
||||
@echo "loading harbor images..."
|
||||
$(DOCKERCOMPOSECMD) -f $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME) up -d
|
||||
@echo "Install complete. You can visit harbor now."
|
||||
|
||||
package_online: modify_composefile
|
||||
@echo "packing online package ..."
|
||||
@cp -r make $(HARBORPKG)
|
||||
@if [ -n "$(REGISTRYSERVER)" ] ; then \
|
||||
$(SEDCMD) -i 's/image\: vmware/image\: $(REGISTRYSERVER)\/$(REGISTRYPROJECTNAME)/' \
|
||||
$(HARBORPKG)/docker-compose.$(VERSIONTAG).yml ; \
|
||||
fi
|
||||
@cp LICENSE $(HARBORPKG)/LICENSE
|
||||
@cp NOTICE $(HARBORPKG)/NOTICE
|
||||
@$(TARCMD) -zcvf harbor-online-installer-$(VERSIONTAG).tgz \
|
||||
--exclude=$(HARBORPKG)/common/db --exclude=$(HARBORPKG)/ubuntu \
|
||||
--exclude=$(HARBORPKG)/photon --exclude=$(HARBORPKG)/kubernetes \
|
||||
--exclude=$(HARBORPKG)/dev --exclude=$(DOCKERCOMPOSETPLFILENAME) \
|
||||
--exclude=$(HARBORPKG)/checkenv.sh \
|
||||
--exclude=$(HARBORPKG)/jsminify.sh \
|
||||
--exclude=$(HARBORPKG)/pushimage.sh \
|
||||
$(HARBORPKG)
|
||||
|
||||
@rm -rf $(HARBORPKG)
|
||||
@echo "Done."
|
||||
|
||||
package_offline: compile build modify_composefile
|
||||
@echo "packing offline package ..."
|
||||
@cp -r make $(HARBORPKG)
|
||||
|
||||
@cp LICENSE $(HARBORPKG)/LICENSE
|
||||
@cp NOTICE $(HARBORPKG)/NOTICE
|
||||
|
||||
@echo "pulling nginx and registry..."
|
||||
$(DOCKERPULL) registry:2.5.0
|
||||
$(DOCKERPULL) nginx:1.9
|
||||
|
||||
@echo "saving harbor docker image"
|
||||
$(DOCKERSAVE) -o $(HARBORPKG)/$(DOCKERIMGFILE).$(VERSIONTAG).tgz \
|
||||
$(DOCKERIMAGENAME_UI):$(VERSIONTAG) \
|
||||
$(DOCKERIMAGENAME_LOG):$(VERSIONTAG) \
|
||||
$(DOCKERIMAGENAME_DB):$(VERSIONTAG) \
|
||||
$(DOCKERIMAGENAME_JOBSERVICE):$(VERSIONTAG) \
|
||||
nginx:1.9 registry:2.5.0
|
||||
|
||||
@$(TARCMD) -zcvf harbor-offline-installer-$(VERSIONTAG).tgz \
|
||||
--exclude=$(HARBORPKG)/common/db --exclude=$(HARBORPKG)/ubuntu \
|
||||
--exclude=$(HARBORPKG)/photon --exclude=$(HARBORPKG)/kubernetes \
|
||||
--exclude=$(HARBORPKG)/dev --exclude=$(DOCKERCOMPOSETPLFILENAME) \
|
||||
--exclude=$(HARBORPKG)/checkenv.sh \
|
||||
--exclude=$(HARBORPKG)/jsminify.sh \
|
||||
--exclude=$(HARBORPKG)/pushimage.sh \
|
||||
$(HARBORPKG)
|
||||
|
||||
@rm -rf $(HARBORPKG)
|
||||
@echo "Done."
|
||||
|
||||
pushimage:
|
||||
@echo "pushing harbor images ..."
|
||||
$(DOCKERTAG) $(DOCKERIMAGENAME_UI):$(VERSIONTAG) $(REGISTRYSERVER)$(DOCKERIMAGENAME_UI):$(VERSIONTAG)
|
||||
$(PUSHSCRIPTPATH)/$(PUSHSCRIPTNAME) $(REGISTRYSERVER)$(DOCKERIMAGENAME_UI):$(VERSIONTAG) \
|
||||
$(REGISTRYUSER) $(REGISTRYPASSWORD) $(REGISTRYSERVER)
|
||||
$(DOCKERRMIMAGE) $(REGISTRYSERVER)$(DOCKERIMAGENAME_UI):$(VERSIONTAG)
|
||||
|
||||
@$(DOCKERTAG) $(DOCKERIMAGENAME_JOBSERVICE):$(VERSIONTAG) $(REGISTRYSERVER)$(DOCKERIMAGENAME_JOBSERVICE):$(VERSIONTAG)
|
||||
@$(PUSHSCRIPTPATH)/$(PUSHSCRIPTNAME) $(REGISTRYSERVER)$(DOCKERIMAGENAME_JOBSERVICE):$(VERSIONTAG) \
|
||||
$(REGISTRYUSER) $(REGISTRYPASSWORD) $(REGISTRYSERVER)
|
||||
@$(DOCKERRMIMAGE) $(REGISTRYSERVER)$(DOCKERIMAGENAME_JOBSERVICE):$(VERSIONTAG)
|
||||
|
||||
@$(DOCKERTAG) $(DOCKERIMAGENAME_LOG):$(VERSIONTAG) $(REGISTRYSERVER)$(DOCKERIMAGENAME_LOG):$(VERSIONTAG)
|
||||
@$(PUSHSCRIPTPATH)/$(PUSHSCRIPTNAME) $(REGISTRYSERVER)$(DOCKERIMAGENAME_LOG):$(VERSIONTAG) \
|
||||
$(REGISTRYUSER) $(REGISTRYPASSWORD) $(REGISTRYSERVER)
|
||||
@$(DOCKERRMIMAGE) $(REGISTRYSERVER)$(DOCKERIMAGENAME_LOG):$(VERSIONTAG)
|
||||
|
||||
@$(DOCKERTAG) $(DOCKERIMAGENAME_DB):$(VERSIONTAG) $(REGISTRYSERVER)$(DOCKERIMAGENAME_DB):$(VERSIONTAG)
|
||||
@$(PUSHSCRIPTPATH)/$(PUSHSCRIPTNAME) $(REGISTRYSERVER)$(DOCKERIMAGENAME_DB):$(VERSIONTAG) \
|
||||
$(REGISTRYUSER) $(REGISTRYPASSWORD) $(REGISTRYSERVER)
|
||||
@$(DOCKERRMIMAGE) $(REGISTRYSERVER)$(DOCKERIMAGENAME_DB):$(VERSIONTAG)
|
||||
|
||||
start:
|
||||
@echo "loading harbor images..."
|
||||
@$(DOCKERCOMPOSECMD) -f $(DOCKERCOMPOSEFILEPATH)/docker-compose.$(VERSIONTAG).yml up -d
|
||||
@echo "Start complete. You can visit harbor now."
|
||||
|
||||
down:
|
||||
@echo "stoping harbor instance..."
|
||||
@$(DOCKERCOMPOSECMD) -f $(DOCKERCOMPOSEFILEPATH)/docker-compose.yml down
|
||||
@echo "Done."
|
||||
|
||||
cleanbinary:
|
||||
@echo "cleaning binary..."
|
||||
@if [ -f $(UIBINARYPATH)/$(UIBINARYNAME) ] ; then rm $(UIBINARYPATH)/$(UIBINARYNAME) ; fi
|
||||
@if [ -f $(JOBSERVICEBINARYPATH)/$(JOBSERVICEBINARYNAME) ] ; then rm $(JOBSERVICEBINARYPATH)/$(JOBSERVICEBINARYNAME) ; fi
|
||||
|
||||
cleanimage:
|
||||
@echo "cleaning image for photon..."
|
||||
- $(DOCKERRMIMAGE) -f $(DOCKERIMAGENAME_UI):$(VERSIONTAG)
|
||||
- $(DOCKERRMIMAGE) -f $(DOCKERIMAGENAME_DB):$(VERSIONTAG)
|
||||
- $(DOCKERRMIMAGE) -f $(DOCKERIMAGENAME_JOBSERVICE):$(VERSIONTAG)
|
||||
- $(DOCKERRMIMAGE) -f $(DOCKERIMAGENAME_LOG):$(VERSIONTAG)
|
||||
#- $(DOCKERRMIMAGE) -f registry:2.5.0
|
||||
#- $(DOCKERRMIMAGE) -f nginx:1.9
|
||||
|
||||
cleandockercomposefile:
|
||||
@echo "cleaning $(DOCKERCOMPOSEFILEPATH)/docker-compose.$(VERSIONTAG).yml"
|
||||
@if [ -f $(DOCKERCOMPOSEFILEPATH)/docker-compose.$(VERSIONTAG).yml ] ; then rm $(DOCKERCOMPOSEFILEPATH)/docker-compose.$(VERSIONTAG).yml ; fi
|
||||
|
||||
cleanversiontag:
|
||||
@echo "cleaning version TAG"
|
||||
@$(SEDCMD) -i 's/version=\"$(VERSIONTAG)\"/version=\"{{.Version}}\"/' -i $(VERSIONFILEPATH)/$(VERSIONFILENAME)
|
||||
|
||||
cleanpackage:
|
||||
@echo "cleaning harbor install package"
|
||||
@if [ -d $(BUILDPATH)/harbor ] ; then rm -rf $(BUILDPATH)/harbor ; fi
|
||||
@if [ -f $(BUILDPATH)/harbor-online-installer-$(VERSIONTAG).tgz ] ; \
|
||||
then rm $(BUILDPATH)/harbor-online-installer-$(VERSIONTAG).tgz ; fi
|
||||
@if [ -f $(BUILDPATH)/harbor-offline-installer-$(VERSIONTAG).tgz ] ; \
|
||||
then rm $(BUILDPATH)/harbor-offline-installer-$(VERSIONTAG).tgz ; fi
|
||||
|
||||
.PHONY: cleanall
|
||||
cleanall: cleanbinary cleanimage cleandockercomposefile cleanversiontag cleanpackage
|
||||
|
||||
clean:
|
||||
@echo " make cleanall: remove binary, Harbor images, specific version docker-compose"
|
||||
@echo " file, specific version tag, online and offline install package"
|
||||
@echo " make cleanbinary: remove ui and jobservice binary"
|
||||
@echo " make cleanimage: remove Harbor images"
|
||||
@echo " make cleandockercomposefile: remove specific version docker-compose"
|
||||
@echo " make cleanversiontag: cleanpackageremove specific version tag"
|
||||
@echo " make cleanpackage: remove online and offline install package"
|
||||
|
||||
all: install
|
15
README.md
|
@ -29,12 +29,19 @@ On an Internet connected host, Harbor can be easily installed via docker-compose
|
|||
```sh
|
||||
$ git clone https://github.com/vmware/harbor
|
||||
```
|
||||
2. Edit the file **Deploy/harbor.cfg**, make necessary configuration changes such as hostname, admin password and mail server. Refer to [Installation and Configuration Guide](docs/installation_guide.md) for more info.
|
||||
2. Edit the file **make/harbor.cfg**, make necessary configuration changes such as hostname, admin password and mail server. Refer to [Installation and Configuration Guide](docs/installation_guide.md) for more info.
|
||||
|
||||
|
||||
3. Install Harbor with the following commands. Note that the docker-compose process can take a while.
|
||||
3. Install Harbor with the following methods. Note that the build container images process can take a while.
|
||||
|
||||
I: Automation Install
|
||||
```sh
|
||||
$ cd Deploy
|
||||
$ make install
|
||||
```
|
||||
|
||||
II: Manual Install
|
||||
```sh
|
||||
$ cd make
|
||||
|
||||
$ ./prepare
|
||||
Generated configuration file: ./config/ui/env
|
||||
|
@ -42,6 +49,8 @@ On an Internet connected host, Harbor can be easily installed via docker-compose
|
|||
Generated configuration file: ./config/registry/config.yml
|
||||
Generated configuration file: ./config/db/env
|
||||
|
||||
$ cd dev
|
||||
|
||||
$ docker-compose up -d
|
||||
```
|
||||
|
||||
|
|
|
@ -1,32 +0,0 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/vmware/harbor/tests/apitests/apilib"
|
||||
)
|
||||
|
||||
func TestSearch(t *testing.T) {
|
||||
fmt.Println("Testing Search(SearchGet) API")
|
||||
assert := assert.New(t)
|
||||
|
||||
apiTest := newHarborAPI()
|
||||
var result apilib.Search
|
||||
result, err := apiTest.SearchGet("library")
|
||||
//fmt.Printf("%+v\n", result)
|
||||
if err != nil {
|
||||
t.Error("Error while search project or repository", err.Error())
|
||||
t.Log(err)
|
||||
} else {
|
||||
assert.Equal(result.Projects[0].Id, int64(1), "Project id should be equal")
|
||||
assert.Equal(result.Projects[0].Name, "library", "Project name should be library")
|
||||
assert.Equal(result.Projects[0].Public, int32(1), "Project public status should be 1 (true)")
|
||||
//t.Log(result)
|
||||
}
|
||||
//if result.Response.StatusCode != 200 {
|
||||
// t.Log(result.Response)
|
||||
//}
|
||||
|
||||
}
|
|
@ -1,9 +0,0 @@
|
|||
package auth
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMain(t *testing.T) {
|
||||
}
|
||||
|
|
@ -1,11 +0,0 @@
|
|||
package controllers
|
||||
|
||||
// AccountSettingController handles request to /account_setting
|
||||
type AccountSettingController struct {
|
||||
BaseController
|
||||
}
|
||||
|
||||
// Get renders the account settings page
|
||||
func (asc *AccountSettingController) Get() {
|
||||
asc.Forward("page_title_account_setting", "account-settings.htm")
|
||||
}
|
|
@ -1,11 +0,0 @@
|
|||
package controllers
|
||||
|
||||
// AdminOptionController handles requests to /admin_option
|
||||
type AdminOptionController struct {
|
||||
BaseController
|
||||
}
|
||||
|
||||
// Get renders the admin options page
|
||||
func (aoc *AdminOptionController) Get() {
|
||||
aoc.Forward("page_title_admin_option", "admin-options.htm")
|
||||
}
|
|
@ -1,11 +0,0 @@
|
|||
package controllers
|
||||
|
||||
// ChangePasswordController handles request to /change_password
|
||||
type ChangePasswordController struct {
|
||||
BaseController
|
||||
}
|
||||
|
||||
// Get renders the change password page
|
||||
func (asc *ChangePasswordController) Get() {
|
||||
asc.Forward("page_title_change_password", "change-password.htm")
|
||||
}
|
96
dao/base.go
|
@ -1,96 +0,0 @@
|
|||
/*
|
||||
Copyright (c) 2016 VMware, Inc. All Rights Reserved.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dao
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/astaxie/beego/orm"
|
||||
_ "github.com/go-sql-driver/mysql" //register mysql driver
|
||||
"github.com/vmware/harbor/utils/log"
|
||||
)
|
||||
|
||||
// NonExistUserID : if a user does not exist, the ID of the user will be 0.
|
||||
const NonExistUserID = 0
|
||||
|
||||
// GenerateRandomString generates a random string
|
||||
func GenerateRandomString() (string, error) {
|
||||
o := orm.NewOrm()
|
||||
var uuid string
|
||||
err := o.Raw(`select uuid() as uuid`).QueryRow(&uuid)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return uuid, nil
|
||||
|
||||
}
|
||||
|
||||
//InitDB initializes the database
|
||||
func InitDB() {
|
||||
// orm.Debug = true
|
||||
orm.RegisterDriver("mysql", orm.DRMySQL)
|
||||
addr := os.Getenv("MYSQL_HOST")
|
||||
port := os.Getenv("MYSQL_PORT")
|
||||
username := os.Getenv("MYSQL_USR")
|
||||
password := os.Getenv("MYSQL_PWD")
|
||||
|
||||
log.Debugf("db url: %s:%s, db user: %s", addr, port, username)
|
||||
dbStr := username + ":" + password + "@tcp(" + addr + ":" + port + ")/registry"
|
||||
ch := make(chan int, 1)
|
||||
go func() {
|
||||
var err error
|
||||
var c net.Conn
|
||||
for {
|
||||
c, err = net.DialTimeout("tcp", addr+":"+port, 20*time.Second)
|
||||
if err == nil {
|
||||
c.Close()
|
||||
ch <- 1
|
||||
} else {
|
||||
log.Errorf("failed to connect to db, retry after 2 seconds :%v", err)
|
||||
time.Sleep(2 * time.Second)
|
||||
}
|
||||
}
|
||||
}()
|
||||
select {
|
||||
case <-ch:
|
||||
case <-time.After(60 * time.Second):
|
||||
panic("Failed to connect to DB after 60 seconds")
|
||||
}
|
||||
err := orm.RegisterDataBase("default", "mysql", dbStr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
var globalOrm orm.Ormer
|
||||
var once sync.Once
|
||||
|
||||
// GetOrmer :set ormer singleton
|
||||
func GetOrmer() orm.Ormer {
|
||||
once.Do(func() {
|
||||
globalOrm = orm.NewOrm()
|
||||
})
|
||||
return globalOrm
|
||||
}
|
||||
|
||||
func paginateForRawSQL(sql string, limit, offset int64) string {
|
||||
return fmt.Sprintf("%s limit %d offset %d", sql, limit, offset)
|
||||
}
|
|
@ -14,7 +14,7 @@ Configure security connection between Harbor and Docker client.
|
|||
[Upgrade and Data Migration Guide](migration_guide.md)
|
||||
Data migration may be needed when upgrading Harbor to a newer version.
|
||||
|
||||
[Deploy Harbor on Kubernetes](kubernetes_deployment.md)
|
||||
[make Harbor on Kubernetes](kubernetes_deployment.md)
|
||||
Guide to deploy Harbor on Kubenetes. (maintained by community)
|
||||
|
||||
### Developer documents
|
||||
|
@ -31,7 +31,7 @@ How to add your local language to Harbor.
|
|||
|
||||
[Python SDK](../contrib/sdk/harbor-py) (by community)
|
||||
|
||||
[Deploying Harbor using Docker Machine](../contrib/deploying_using_docker_machine.md) ( by community)
|
||||
[makeing Harbor using Docker Machine](../contrib/deploying_using_docker_machine.md) ( by community)
|
||||
|
||||
[Configuring Harbor as a local registry mirror](../contrib/Configure_mirror.md) (by community)
|
||||
|
||||
|
@ -51,7 +51,7 @@ How to add your local language to Harbor.
|
|||
|
||||
[Overall Architecture of Harbor Registry](http://www.compare-review-information.com/overall-architecture-of-harbor-registry/)
|
||||
|
||||
[Deploying a Private Secured Docker Registry in 15 Minutes](http://alexanderzeitler.com/articles/deploying-a-private-secured-docker-registry-within-15-minutes/)
|
||||
[makeing a Private Secured Docker Registry in 15 Minutes](http://alexanderzeitler.com/articles/deploying-a-private-secured-docker-registry-within-15-minutes/)
|
||||
|
||||
[Docker Private Registry Using Harbor](https://blog.imaginea.com/docker-private-registry-using-harbor-2/)
|
||||
|
||||
|
|
|
@ -45,9 +45,9 @@ If you're using **IP** to connect your registry host, you may instead run the co
|
|||
openssl ca -in yourdomain.com.csr -out yourdomain.com.crt -cert ca.crt -keyfile ca.key -extfile extfile.cnf -outdir .
|
||||
```
|
||||
##Configuration of Nginx
|
||||
After obtaining the **yourdomain.com.crt** and **yourdomain.com.key** files, change the directory to Deploy/config/nginx in Harbor project.
|
||||
After obtaining the **yourdomain.com.crt** and **yourdomain.com.key** files, change the directory to make/config/nginx in Harbor project.
|
||||
```
|
||||
cd Deploy/config/nginx
|
||||
cd make/config/nginx
|
||||
```
|
||||
Create a new directory cert/, if it does not exist. Then copy **yourdomain.com.crt** and **yourdomain.com.key** to cert/, e.g. :
|
||||
```
|
||||
|
@ -87,7 +87,7 @@ Then look for the SSL section to make sure the files of your certificates match
|
|||
Save your changes in nginx.conf.
|
||||
|
||||
##Installation of Harbor
|
||||
Next, edit the file Deploy/harbor.cfg , update the hostname and the protocol:
|
||||
Next, edit the file make/harbor.cfg , update the hostname and the protocol:
|
||||
```
|
||||
#set hostname
|
||||
hostname = reg.yourdomain.com
|
||||
|
|
|
@ -34,9 +34,9 @@ From time to time, you may need to mannually test Harbor REST API. You can deplo
|
|||
```sh
|
||||
./prepare-swagger.sh
|
||||
```
|
||||
* Change the directory to _Deploy_
|
||||
* Change the directory to _make_
|
||||
```sh
|
||||
cd ../Deploy
|
||||
cd ../make
|
||||
```
|
||||
* Edit the _docker-compose.yml_ file.
|
||||
```sh
|
||||
|
@ -51,8 +51,8 @@ ui:
|
|||
- ./config/ui/app.conf:/etc/ui/app.conf
|
||||
- ./config/ui/private_key.pem:/etc/ui/private_key.pem
|
||||
## add two lines as below ##
|
||||
- ../static/vendors/swagger-ui-2.1.4/dist:/go/bin/static/vendors/swagger
|
||||
- ../static/resources/yaml/swagger.yaml:/go/bin/static/resources/yaml/swagger.yaml
|
||||
- ../../src/ui/static/vendors/swagger-ui-2.1.4/dist:/go/bin/static/vendors/swagger
|
||||
- ../../src/ui/static/resources/yaml/swagger.yaml:/go/bin/static/resources/yaml/swagger.yaml
|
||||
...
|
||||
```
|
||||
* Rebuild Harbor project
|
||||
|
|
|
@ -50,7 +50,7 @@ $ cp /root/cert/private_key.pem private_key.pem
|
|||
$ cp /root/cert/root.crt ../registry/root.crt
|
||||
```
|
||||
|
||||
5.After these, go back to the Deploy directory, you can start Harbor using following command:
|
||||
5.After these, go back to the make directory, you can start Harbor using following command:
|
||||
```
|
||||
$ docker-compose up -d
|
||||
```
|
||||
|
|
|
@ -45,14 +45,14 @@
|
|||
|
||||
5. Add the new language to the `app.conf` file.
|
||||
|
||||
In the file `Deploy/config/ui/app.conf`, append a new item to the configuration section.
|
||||
In the file `make/config/ui/app.conf`, append a new item to the configuration section.
|
||||
```
|
||||
[lang]
|
||||
types = en-US|zh-CN|<language>-<locale>
|
||||
names = en-US|zh-CN|<language>-<locale>
|
||||
```
|
||||
|
||||
6. Next, change to `Deploy/` directory, rebuild and restart the Harbor by the below command:
|
||||
6. Next, change to `make/` directory, rebuild and restart the Harbor by the below command:
|
||||
```
|
||||
docker-compose down
|
||||
docker-compose up --build -d
|
||||
|
|
|
@ -1,11 +0,0 @@
|
|||
### A faster way to pull images for Chinese Harbor users
|
||||
By default, Harbor not only build images according to Dockerfile but also pull images from Docker Hub. For the reason we all know, it is difficult for Chinese Harbor users to pull images from the Docker Hub. We put images on daocloud.io platform, we'll put images on other platforms later. If you have difficulty to pull images from Docker Hub, or you think it wastes too much time to build images. We recommend you to use the following way to accelerate the pulling procedure(make sure you're in the harbor diectory):
|
||||
```
|
||||
$ cd contrib
|
||||
$ cp docker-compose.yml.daocloud ../Deploy
|
||||
$ cd ../Deploy
|
||||
$ mv docker-compose.yml docker-compose.yml.bak
|
||||
$ mv docker-compose.yml.daocloud docker-compose.yml
|
||||
$ docker-compose up -d
|
||||
```
|
||||
Then you'll see docker pulling imges faster than before.
|
BIN
docs/img/ova/edit_settings.png
Normal file
After Width: | Height: | Size: 126 KiB |
BIN
docs/img/ova/ova01.png
Normal file
After Width: | Height: | Size: 137 KiB |
BIN
docs/img/ova/ova02.png
Normal file
After Width: | Height: | Size: 24 KiB |
BIN
docs/img/ova/ova03.png
Normal file
After Width: | Height: | Size: 30 KiB |
BIN
docs/img/ova/ova04.png
Normal file
After Width: | Height: | Size: 44 KiB |
BIN
docs/img/ova/ova05.png
Normal file
After Width: | Height: | Size: 47 KiB |
BIN
docs/img/ova/ova06.png
Normal file
After Width: | Height: | Size: 41 KiB |
BIN
docs/img/ova/ova07.png
Normal file
After Width: | Height: | Size: 46 KiB |
BIN
docs/img/ova/ova08.png
Normal file
After Width: | Height: | Size: 40 KiB |
BIN
docs/img/ova/vapp_options.png
Normal file
After Width: | Height: | Size: 40 KiB |
|
@ -7,7 +7,7 @@ Harbor can be installed by one of two installers:
|
|||
|
||||
Both installers can be downloaded from the [release page](https://github.com/vmware/harbor/releases). The installation process of both installers are the same, this guide describes the steps to install and configure Harbor.
|
||||
|
||||
In addition, the deployment instructions on Kubernetes has been created by the community. Refer to [Deploy Harbor on Kubernetes](kubernetes_deployment.md) for details.
|
||||
In addition, the deployment instructions on Kubernetes has been created by the community. Refer to [make Harbor on Kubernetes](kubernetes_deployment.md) for details.
|
||||
|
||||
## Prerequisites for the target host
|
||||
Harbor is deployed as several Docker containers, and, therefore, can be deployed on any Linux distribution that supports Docker. The target host requires Python, Docker, and Docker Compose to be installed.
|
||||
|
@ -281,7 +281,7 @@ $ sudo install.sh
|
|||
If a container is not in **UP** state, check the log file of that container in directory ```/var/log/harbor```. For example, if the container ```harbor_ui_1``` is not running, you should look at the log file ```docker_ui.log```.
|
||||
|
||||
|
||||
2.When setting up Harbor behind an nginx proxy or elastic load balancing, look for the line below, in `Deploy/config/nginx/nginx.conf` and remove it from the sections if the proxy already has similar settings: `location /`, `location /v2/` and `location /service/`.
|
||||
2.When setting up Harbor behind an nginx proxy or elastic load balancing, look for the line below, in `make/config/nginx/nginx.conf` and remove it from the sections if the proxy already has similar settings: `location /`, `location /v2/` and `location /service/`.
|
||||
```
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
```
|
||||
|
|
86
docs/installation_guide_ova.md
Normal file
|
@ -0,0 +1,86 @@
|
|||
# Install and Configure Harbor on vSphere using OVA
|
||||
This guide takes you through the steps about installing and configuring Harbor on vSphere using OVA.
|
||||
|
||||
## Installation
|
||||
1.Get URL or download the OVA file to your local disk from [release page](https://github.com/vmware/harbor/releases).
|
||||
|
||||
2.Login vSphere web client. Right click on the datacenter, cluster or host which Harbor will be deployed on. Select "Deploy OVF Template" and open the import wizard.
|
||||
|
||||

|
||||
|
||||
3.Paste the URL of OVA file or select it from local disk and click "Next".
|
||||
|
||||

|
||||
|
||||
4.Review the OVF template details and click "Next".
|
||||
|
||||

|
||||
|
||||
5.Spefify a name and location for the deployed template.
|
||||
|
||||

|
||||
|
||||
6.Select the storage and virtual disk format, click "Next".
|
||||
|
||||

|
||||
|
||||
7.Configure the networks the deployed template should use.
|
||||
|
||||

|
||||
|
||||
8.Customize the properties of Harbor. The properties are described below. Note that at the very least, you just need to set the **Root Password**, **Harbor Admin Password** and **Database Password** properties.
|
||||
|
||||

|
||||
|
||||
* Application
|
||||
* **Root Password**: The password of the root user. (8-128 characters)
|
||||
* **Harbor Admin Password**: The initial password of Harbor admin. It only works for the first time when Harbor starts. It has no effect after the first launch of Harbor. Change the admin password from UI after launching Harbor. (8-20 characters)
|
||||
* **Database Password**: The password of the root user of MySQL database. (8-128 characters)
|
||||
* **Authentication Mode**: The default authentication mode is db_auth, i.e. the credentials are stored in a local database. Set it to ldap_auth if you want to verify the user's credential against an LDAP/AD server.
|
||||
* **LDAP URL**: The URL of an LDAP/AD server.
|
||||
* **LDAP Search DN**: A user's DN who has the permission to search the LDAP/AD server. If your LDAP/AD server does not support anonymous search, you should configure this DN and LDAP Seach Password.
|
||||
* **LDAP Search Password**: The password of the user for LDAP search.
|
||||
* **LDAP Base DN**: The base DN from which to look up a user in LDAP/AD.
|
||||
* **LDAP UID**: The attribute used in a search to match a user, it could be uid, cn, email, sAMAccountName or other attributes depending on your LDAP/AD server.
|
||||
* **Email Server**: The mail server to send out emails to reset password.
|
||||
* **Email Server Port**: The port of mail server.
|
||||
* **Email Username**: The user from whom the password reset email is sent.
|
||||
* **Email Password**: The password of the user from whom the password reset email is sent.
|
||||
* **Email From**: The name of the email sender.
|
||||
* **Email SSL**: Whether to enabled secure mail transmission.
|
||||
* **SSL Cert**: Paste in the content of a certificate file. If SSL Cert and SSL Cert Key are both set, HTTPS will be used.
|
||||
* **SSL Cert Key**: Paste in the content of certificate key file. If SSL Cert and SSL Cert Key are both set, HTTPS will be used.
|
||||
* **Self Registration**: Determine whether the self-registration is allowed or not when the authentication mode is database. Set this to off to disable a user's self-registration in Harbor.
|
||||
* **Verify Remote Cert**: Determine whether the image replication should verify the SSL certificate when it connects to a remote registry. Set this flag to off when the remote registry uses a self-signed or untrusted certificate.
|
||||
* **Garbage Collection**: When setting this to true, Harbor performs garbage collection everytime it boots up.
|
||||
|
||||
* Networking properties
|
||||
* **Default Gateway**: The default gateway address for this VM. Leave blank if DHCP is desired.
|
||||
* **Domain Name**: The domain name of this VM. Leave blank if DHCP is desired.
|
||||
* **Domain Search Path**: The domain search path(comma or space separated domain names) for this VM. Leave blank if DHCP is desired.
|
||||
* **Domain Name Servers**: The domain name server IP Address for this VM(comma separated). Leave blank if DHCP is desired.
|
||||
* **Network 1 IP Adress**: The IP address of this interface. Leave blank if DHCP is desired.
|
||||
* **Network 1 Netmask**: The netmask or prefix for this interface. Leave blank if DHCP is desired.
|
||||
|
||||
**Notes:** If you want to enable HTTPS with a self-signed certificate and have no idea how to generate it, refer to the "Getting a certificate" part of this [guide](https://github.com/vmware/harbor/blob/master/docs/configure_https.md#getting-a-certificate).
|
||||
|
||||
After you complete the properties, click "Next".
|
||||
|
||||
9.Review your settings and click "Finish" to complete the installation.
|
||||
|
||||

|
||||
|
||||
## Reconfiguration
|
||||
If you want to reconfigure the properties of Harbor, follow the steps:
|
||||
1.Power off the VM which Harbor is deployed on.
|
||||
2.Right click on the VM and select "Edit Settings".
|
||||
|
||||

|
||||
|
||||
3.Click the "vApp Options" tab, reconfigure the properties and click "OK".
|
||||
|
||||

|
||||
|
||||
4.Power on the VM.
|
||||
|
||||
**Notes:** "Harbor Admin Password" and all networking properties can not be modified using this method after Harbor launched. Change the admin password from UI and change the networking properties in the OS level manually.
|
|
@ -1,4 +1,4 @@
|
|||
## Deploying Harbor on Kubernetes
|
||||
## makeing Harbor on Kubernetes
|
||||
To deploy Harbor on Kubernetes, it requires some additional steps because
|
||||
1. When Harbor registry uses https, so we need cert or workaround to avoid errors like this:
|
||||
```
|
||||
|
@ -24,13 +24,13 @@ To deploy Harbor on Kubernetes, it requires some additional steps because
|
|||
- Rebuild the registry image with the service IP after the service is created and use ```kubectl rolling-update``` to update to the new image.
|
||||
|
||||
|
||||
To start Harbor on Kubernetes, you first need to build the docker images. The docker images for deploying Harbor on Kubernetes depends on the docker images to deploy Harbor with docker-compose. So the first step is to build docker images with docker-compose. Before actually building the images, you need to first adjust the [configuration](https://github.com/vmware/harbor/blob/master/Deploy/harbor.cfg):
|
||||
- Change the [hostname](https://github.com/vmware/harbor/blob/master/Deploy/harbor.cfg#L5) to ```localhost```
|
||||
- Adjust the [email settings](https://github.com/vmware/harbor/blob/master/Deploy/harbor.cfg#L11) according to your needs.
|
||||
To start Harbor on Kubernetes, you first need to build the docker images. The docker images for deploying Harbor on Kubernetes depends on the docker images to deploy Harbor with docker-compose. So the first step is to build docker images with docker-compose. Before actually building the images, you need to first adjust the [configuration](https://github.com/vmware/harbor/blob/master/make/harbor.cfg):
|
||||
- Change the [hostname](https://github.com/vmware/harbor/blob/master/make/harbor.cfg#L5) to ```localhost```
|
||||
- Adjust the [email settings](https://github.com/vmware/harbor/blob/master/make/harbor.cfg#L11) according to your needs.
|
||||
|
||||
Then you can run the following commends to build docker images:
|
||||
```
|
||||
cd Deploy
|
||||
cd make
|
||||
./prepare
|
||||
docker-compose build
|
||||
docker build -f kubernetes/dockerfiles/proxy-dockerfile -t {your_account}/proxy .
|
||||
|
@ -45,21 +45,21 @@ docker push {your_account}/deploy_mysql
|
|||
|
||||
where "your_account" is your own registry. Then you need to update the "image" field in the ```*-rc.yaml``` files at:
|
||||
```
|
||||
Deploy/kubernetes/mysql-rc.yaml
|
||||
Deploy/kubernetes/proxy-rc.yaml
|
||||
Deploy/kubernetes/registry-rc.yaml
|
||||
Deploy/kubernetes/ui-rc.yaml
|
||||
make/kubernetes/mysql-rc.yaml
|
||||
make/kubernetes/proxy-rc.yaml
|
||||
make/kubernetes/registry-rc.yaml
|
||||
make/kubernetes/ui-rc.yaml
|
||||
```
|
||||
|
||||
Further more, the following configuration could be changed according to your need:
|
||||
- **harbor_admin_password**: The password for the administrator of Harbor, by default the password is Harbor12345. You can changed it [here](https://github.com/vmware/harbor/blob/master/Deploy/kubernetes/ui-rc.yaml#L36).
|
||||
- **auth_mode**: The authentication mode of Harbor. By default it is *db_auth*, i.e. the credentials are stored in a database. Please set it to *ldap_auth* if you want to verify user's credentials against an LDAP server. You can change the configuration [here](https://github.com/vmware/harbor/blob/master/Deploy/kubernetes/ui-rc.yaml#L40).
|
||||
- **ldap_url**: The URL for LDAP endpoint, for example ldaps://ldap.mydomain.com. It is only used when **auth_mode** is set to *ldap_auth*. It could be changed [here](https://github.com/vmware/harbor/blob/master/Deploy/kubernetes/ui-rc.yaml#L42).
|
||||
- **ldap_basedn**: The basedn template for verifying the user's credentials against LDAP, for example uid=%s,ou=people,dc=mydomain,dc=com. It is only used when **auth_mode** is set to *ldap_auth*. It could be changed [here](https://github.com/vmware/harbor/blob/master/Deploy/kubernetes/ui-rc.yaml#L44).
|
||||
- **db_password**: The password of root user of mySQL database. Change this password for any production use. You need to change both [here](https://github.com/vmware/harbor/blob/master/Deploy/kubernetes/ui-rc.yaml#L28) and [here](https://github.com/vmware/harbor/blob/master/Deploy/harbor.cfg#L32) to make the change. Please note, you need to change the ```harbor.cfg``` before building the docker images.
|
||||
- **harbor_admin_password**: The password for the administrator of Harbor, by default the password is Harbor12345. You can changed it [here](https://github.com/vmware/harbor/blob/master/make/kubernetes/ui-rc.yaml#L36).
|
||||
- **auth_mode**: The authentication mode of Harbor. By default it is *db_auth*, i.e. the credentials are stored in a database. Please set it to *ldap_auth* if you want to verify user's credentials against an LDAP server. You can change the configuration [here](https://github.com/vmware/harbor/blob/master/make/kubernetes/ui-rc.yaml#L40).
|
||||
- **ldap_url**: The URL for LDAP endpoint, for example ldaps://ldap.mydomain.com. It is only used when **auth_mode** is set to *ldap_auth*. It could be changed [here](https://github.com/vmware/harbor/blob/master/make/kubernetes/ui-rc.yaml#L42).
|
||||
- **ldap_basedn**: The basedn template for verifying the user's credentials against LDAP, for example uid=%s,ou=people,dc=mydomain,dc=com. It is only used when **auth_mode** is set to *ldap_auth*. It could be changed [here](https://github.com/vmware/harbor/blob/master/make/kubernetes/ui-rc.yaml#L44).
|
||||
- **db_password**: The password of root user of mySQL database. Change this password for any production use. You need to change both [here](https://github.com/vmware/harbor/blob/master/make/kubernetes/ui-rc.yaml#L28) and [here](https://github.com/vmware/harbor/blob/master/make/harbor.cfg#L32) to make the change. Please note, you need to change the ```harbor.cfg``` before building the docker images.
|
||||
|
||||
Finally you can start the jobs by running:
|
||||
```
|
||||
kubectl create -f Deploy/kubernetes
|
||||
kubectl create -f make/kubernetes
|
||||
```
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@ When upgrading your existing Habor instance to a newer version, you may need to
|
|||
1. Log in to the machine that Harbor runs on, stop and remove existing Harbor service if it is still running:
|
||||
|
||||
```
|
||||
cd Deploy/
|
||||
cd make/
|
||||
docker-compose down
|
||||
```
|
||||
|
||||
|
@ -28,11 +28,11 @@ When upgrading your existing Habor instance to a newer version, you may need to
|
|||
```
|
||||
|
||||
4. Before upgrading Harbor, perform database migration first.
|
||||
The directory **migration/** contains the tool for migration. The first step is to update values of `db_username`, `db_password`, `db_port`, `db_name` in **migration.cfg** so that they match your system's configuration.
|
||||
The directory **tools/migration/** contains the tool for migration. The first step is to update values of `db_username`, `db_password`, `db_port`, `db_name` in **migration.cfg** so that they match your system's configuration.
|
||||
|
||||
5. The migration tool is delivered as a container, so you should build the image from its Dockerfile:
|
||||
```
|
||||
cd migration/
|
||||
cd tools/migration/
|
||||
|
||||
docker build -t migrate-tool .
|
||||
```
|
||||
|
@ -49,11 +49,11 @@ The directory **migration/** contains the tool for migration. The first step is
|
|||
docker run -ti --rm -v /data/database:/var/lib/mysql migrate-tool up head
|
||||
```
|
||||
|
||||
8. Change to `Deploy/` directory, configure Harbor by modifying the file `harbor.cfg`, you may need to refer to the configuration files you've backed up during step 2. Refer to [Installation & Configuration Guide ](../docs/installation_guide.md) for more info.
|
||||
8. Change to `make/` directory, configure Harbor by modifying the file `harbor.cfg`, you may need to refer to the configuration files you've backed up during step 2. Refer to [Installation & Configuration Guide ](../docs/installation_guide.md) for more info.
|
||||
|
||||
9. If HTTPS has been enabled for Harbor before, restore the `nginx.conf` and key/certificate files from the backup files in Step 2. Refer to [Configuring Harbor with HTTPS Access](../docs/configure_https.md) for more info.
|
||||
|
||||
10. Under the directory `Deploy/`, run the `./prepare` script to generate necessary config files.
|
||||
10. Under the directory `make/`, run the `./prepare` script to generate necessary config files.
|
||||
|
||||
11. Rebuild Harbor and restart the registry service
|
||||
|
||||
|
@ -67,7 +67,7 @@ For any reason, if you want to roll back to the previous version of Harbor, foll
|
|||
1. Stop and remove the current Harbor service if it is still running.
|
||||
|
||||
```
|
||||
cd Deploy/
|
||||
cd make/
|
||||
docker-compose down
|
||||
```
|
||||
2. Restore database from backup file in `/path/to/backup` .
|
||||
|
@ -88,7 +88,7 @@ For any reason, if you want to roll back to the previous version of Harbor, foll
|
|||
|
||||
5. Restart Harbor service using the previous configuration.
|
||||
```sh
|
||||
cd Deploy/
|
||||
cd make/
|
||||
docker-compose up --build -d
|
||||
```
|
||||
|
||||
|
|
|
@ -7,13 +7,13 @@ rm -f *.tar.gz
|
|||
echo "Downloading Swagger UI release package..."
|
||||
wget https://github.com/swagger-api/swagger-ui/archive/v2.1.4.tar.gz -O swagger.tar.gz
|
||||
echo "Untarring Swagger UI package to the static file path..."
|
||||
tar -C ../static/vendors -zxf swagger.tar.gz swagger-ui-2.1.4/dist
|
||||
tar -C ../src/ui/static/vendors -zxf swagger.tar.gz swagger-ui-2.1.4/dist
|
||||
echo "Executing some processes..."
|
||||
sed -i.bak 's/http:\/\/petstore\.swagger\.io\/v2\/swagger\.json/'$SCHEME':\/\/'$SERVER_IP'\/static\/resources\/yaml\/swagger\.yaml/g' \
|
||||
../static/vendors/swagger-ui-2.1.4/dist/index.html
|
||||
sed -i.bak '/jsonEditor: false,/a\ validatorUrl: null,' ../static/vendors/swagger-ui-2.1.4/dist/index.html
|
||||
mkdir -p ../static/resources/yaml
|
||||
cp swagger.yaml ../static/resources/yaml
|
||||
sed -i.bak 's/host: localhost/host: '$SERVER_IP'/g' ../static/resources/yaml/swagger.yaml
|
||||
sed -i.bak 's/ \- http$/ \- '$SCHEME'/g' ../static/resources/yaml/swagger.yaml
|
||||
../src/ui/static/vendors/swagger-ui-2.1.4/dist/index.html
|
||||
sed -i.bak '/jsonEditor: false,/a\ validatorUrl: null,' ../src/ui/static/vendors/swagger-ui-2.1.4/dist/index.html
|
||||
mkdir -p ../src/ui/static/resources/yaml
|
||||
cp swagger.yaml ../src/ui/static/resources/yaml
|
||||
sed -i.bak 's/host: localhost/host: '$SERVER_IP'/g' ../src/ui/static/resources/yaml/swagger.yaml
|
||||
sed -i.bak 's/ \- http$/ \- '$SCHEME'/g' ../src/ui/static/resources/yaml/swagger.yaml
|
||||
echo "Finish preparation for the Swagger UI."
|
||||
|
|
|
@ -1275,6 +1275,22 @@ paths:
|
|||
description: Replication's target not found
|
||||
500:
|
||||
description: Unexpected internal errors.
|
||||
/internal/syncregistry:
|
||||
post:
|
||||
summary: Sync repositories from registry to DB.
|
||||
description: |
|
||||
This endpoint is for syncing all repositories of registry with database.
|
||||
tags:
|
||||
- Products
|
||||
responses:
|
||||
200:
|
||||
description: Sync repositories successfully.
|
||||
401:
|
||||
description: User need to log in first.
|
||||
403:
|
||||
description: User does not have permission of admin role.
|
||||
500:
|
||||
description: Unexpected internal errors.
|
||||
definitions:
|
||||
Search:
|
||||
type: object
|
||||
|
|
|
@ -67,6 +67,9 @@ You can update or remove a member by clicking the icon on the right.
|
|||
|
||||
##Replicating images
|
||||
If you are a system administrator, you can replicate images to a remote registry, which is called destination in Harbor. Only Harbor instance is supported as a destination for now.
|
||||
|
||||
**Note:** The replication feature is incompatible between Harbor instance before version 0.3.5(included) and after version 0.3.5.
|
||||
|
||||
Click "Add New Policy" on the "Replication" tab, fill the necessary fields and click "OK", a policy for this project will be created. If "Enable" is chosen, the project will be replicated to the remote immediately, and when a new repository is pushed to this project or an existing repository is deleted from this project, the same operation will also be replicated to the destination.
|
||||
|
||||

|
||||
|
@ -169,4 +172,4 @@ $ docker-compose start
|
|||
|
||||
Option "--dry-run" will print the progress without removing any data.
|
||||
|
||||
About the details of GC, please see [GC](https://github.com/docker/distribution/blob/master/docs/garbage-collection.md).
|
||||
About the details of GC, please see [GC](https://github.com/docker/docker.github.io/blob/master/registry/garbage-collection.md).
|
150
make/checkenv.sh
Executable file
|
@ -0,0 +1,150 @@
|
|||
#/bin/bash
|
||||
|
||||
#docker version: 1.11.2
|
||||
#docker-compose version: 1.7.1
|
||||
#Harbor version: 0.4.5+
|
||||
set +e
|
||||
set -o noglob
|
||||
|
||||
#
|
||||
# Set Colors
|
||||
#
|
||||
|
||||
bold=$(tput bold)
|
||||
underline=$(tput sgr 0 1)
|
||||
reset=$(tput sgr0)
|
||||
|
||||
red=$(tput setaf 1)
|
||||
green=$(tput setaf 76)
|
||||
white=$(tput setaf 7)
|
||||
tan=$(tput setaf 202)
|
||||
blue=$(tput setaf 25)
|
||||
|
||||
#
|
||||
# Headers and Logging
|
||||
#
|
||||
|
||||
underline() { printf "${underline}${bold}%s${reset}\n" "$@"
|
||||
}
|
||||
h1() { printf "\n${underline}${bold}${blue}%s${reset}\n" "$@"
|
||||
}
|
||||
h2() { printf "\n${underline}${bold}${white}%s${reset}\n" "$@"
|
||||
}
|
||||
debug() { printf "${white}%s${reset}\n" "$@"
|
||||
}
|
||||
info() { printf "${white}➜ %s${reset}\n" "$@"
|
||||
}
|
||||
success() { printf "${green}✔ %s${reset}\n" "$@"
|
||||
}
|
||||
error() { printf "${red}✖ %s${reset}\n" "$@"
|
||||
}
|
||||
warn() { printf "${tan}➜ %s${reset}\n" "$@"
|
||||
}
|
||||
bold() { printf "${bold}%s${reset}\n" "$@"
|
||||
}
|
||||
note() { printf "\n${underline}${bold}${blue}Note:${reset} ${blue}%s${reset}\n" "$@"
|
||||
}
|
||||
|
||||
set -e
|
||||
|
||||
usage=$'Checking environment for harbor build and install. Include golang, docker and docker-compose.'
|
||||
|
||||
while [ $# -gt 0 ]; do
|
||||
case $1 in
|
||||
--help)
|
||||
note "$usage"
|
||||
exit 0;;
|
||||
*)
|
||||
note "$usage"
|
||||
exit 1;;
|
||||
esac
|
||||
shift || true
|
||||
done
|
||||
|
||||
function check_golang {
|
||||
if ! go version &> /dev/null
|
||||
then
|
||||
warn "No golang package in your enviroment. You should use golang docker image build binary."
|
||||
return
|
||||
fi
|
||||
|
||||
# docker has been installed and check its version
|
||||
if [[ $(go version) =~ (([0-9]+)\.([0-9]+)([\.0-9]*)) ]]
|
||||
then
|
||||
golang_version=${BASH_REMATCH[1]}
|
||||
golang_version_part1=${BASH_REMATCH[2]}
|
||||
golang_version_part2=${BASH_REMATCH[3]}
|
||||
|
||||
# the version of golang does not meet the requirement
|
||||
if [ "$golang_version_part1" -lt 1 ] || ([ "$golang_version_part1" -eq 1 ] && [ "$golang_version_part2" -lt 6 ])
|
||||
then
|
||||
warn "Better to upgrade golang package to 1.6.0+ or use golang docker image build binary."
|
||||
return
|
||||
else
|
||||
note "golang version: $golang_version"
|
||||
fi
|
||||
else
|
||||
warn "Failed to parse golang version."
|
||||
return
|
||||
fi
|
||||
}
|
||||
|
||||
function check_docker {
|
||||
if ! docker --version &> /dev/null
|
||||
then
|
||||
error "Need to install docker(1.10.0+) first and run this script again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# docker has been installed and check its version
|
||||
if [[ $(docker --version) =~ (([0-9]+)\.([0-9]+)([\.0-9]*)) ]]
|
||||
then
|
||||
docker_version=${BASH_REMATCH[1]}
|
||||
docker_version_part1=${BASH_REMATCH[2]}
|
||||
docker_version_part2=${BASH_REMATCH[3]}
|
||||
|
||||
# the version of docker does not meet the requirement
|
||||
if [ "$docker_version_part1" -lt 1 ] || ([ "$docker_version_part1" -eq 1 ] && [ "$docker_version_part2" -lt 10 ])
|
||||
then
|
||||
error "Need to upgrade docker package to 1.10.0+."
|
||||
exit 1
|
||||
else
|
||||
note "docker version: $docker_version"
|
||||
fi
|
||||
else
|
||||
error "Failed to parse docker version."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function check_dockercompose {
|
||||
if ! docker-compose --version &> /dev/null
|
||||
then
|
||||
error "Need to install docker-compose(1.7.1+) by yourself first and run this script again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# docker-compose has been installed, check its version
|
||||
if [[ $(docker-compose --version) =~ (([0-9]+)\.([0-9]+)([\.0-9]*)) ]]
|
||||
then
|
||||
docker_compose_version=${BASH_REMATCH[1]}
|
||||
docker_compose_version_part1=${BASH_REMATCH[2]}
|
||||
docker_compose_version_part2=${BASH_REMATCH[3]}
|
||||
|
||||
# the version of docker-compose does not meet the requirement
|
||||
if [ "$docker_compose_version_part1" -lt 1 ] || ([ "$docker_compose_version_part1" -eq 1 ] && [ "$docker_compose_version_part2" -lt 6 ])
|
||||
then
|
||||
error "Need to upgrade docker-compose package to 1.7.1+."
|
||||
exit 1
|
||||
else
|
||||
note "docker-compose version: $docker_compose_version"
|
||||
fi
|
||||
else
|
||||
error "Failed to parse docker-compose version."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
check_golang
|
||||
check_docker
|
||||
check_dockercompose
|
181
make/common/db/registry_sqlite.sql
Normal file
|
@ -0,0 +1,181 @@
|
|||
create table access (
|
||||
access_id INTEGER PRIMARY KEY,
|
||||
access_code char(1),
|
||||
comment varchar (30)
|
||||
);
|
||||
|
||||
insert into access (access_code, comment) values
|
||||
('M', 'Management access for project'),
|
||||
('R', 'Read access for project'),
|
||||
('W', 'Write access for project'),
|
||||
('D', 'Delete access for project'),
|
||||
('S', 'Search access for project');
|
||||
|
||||
|
||||
create table role (
|
||||
role_id INTEGER PRIMARY KEY,
|
||||
role_mask int DEFAULT 0 NOT NULL,
|
||||
role_code varchar(20),
|
||||
name varchar (20)
|
||||
);
|
||||
/*
|
||||
role mask is used for future enhancement when a project member can have multi-roles
|
||||
currently set to 0
|
||||
*/
|
||||
|
||||
insert into role (role_code, name) values
|
||||
('MDRWS', 'projectAdmin'),
|
||||
('RWS', 'developer'),
|
||||
('RS', 'guest');
|
||||
|
||||
|
||||
create table user (
|
||||
user_id INTEGER PRIMARY KEY,
|
||||
/*
|
||||
The max length of username controlled by API is 20,
|
||||
and 11 is reserved for marking the deleted users.
|
||||
The mark of deleted user is "#user_id".
|
||||
The 11 consist of 10 for the max value of user_id(4294967295)
|
||||
in MySQL and 1 of '#'.
|
||||
*/
|
||||
username varchar(32),
|
||||
/*
|
||||
11 bytes is reserved for marking the deleted users.
|
||||
*/
|
||||
email varchar(255),
|
||||
password varchar(40) NOT NULL,
|
||||
realname varchar (20) NOT NULL,
|
||||
comment varchar (30),
|
||||
deleted tinyint (1) DEFAULT 0 NOT NULL,
|
||||
reset_uuid varchar(40) DEFAULT NULL,
|
||||
salt varchar(40) DEFAULT NULL,
|
||||
sysadmin_flag tinyint (1),
|
||||
creation_time timestamp,
|
||||
update_time timestamp,
|
||||
UNIQUE (username),
|
||||
UNIQUE (email)
|
||||
);
|
||||
|
||||
insert into user (username, email, password, realname, comment, deleted, sysadmin_flag, creation_time, update_time) values
|
||||
('admin', 'admin@example.com', '', 'system admin', 'admin user',0, 1, CURRENT_TIMESTAMP, CURRENT_TIMESTAMP),
|
||||
('anonymous', 'anonymous@example.com', '', 'anonymous user', 'anonymous user', 1, 0, CURRENT_TIMESTAMP, CURRENT_TIMESTAMP);
|
||||
|
||||
create table project (
|
||||
project_id INTEGER PRIMARY KEY,
|
||||
owner_id int NOT NULL,
|
||||
/*
|
||||
The max length of name controlled by API is 30,
|
||||
and 11 is reserved for marking the deleted project.
|
||||
*/
|
||||
name varchar (41) NOT NULL,
|
||||
creation_time timestamp,
|
||||
update_time timestamp,
|
||||
deleted tinyint (1) DEFAULT 0 NOT NULL,
|
||||
public tinyint (1) DEFAULT 0 NOT NULL,
|
||||
FOREIGN KEY (owner_id) REFERENCES user(user_id),
|
||||
UNIQUE (name)
|
||||
);
|
||||
|
||||
insert into project (owner_id, name, creation_time, update_time, public) values
|
||||
(1, 'library', CURRENT_TIMESTAMP, CURRENT_TIMESTAMP, 1);
|
||||
|
||||
create table project_member (
|
||||
project_id int NOT NULL,
|
||||
user_id int NOT NULL,
|
||||
role int NOT NULL,
|
||||
creation_time timestamp,
|
||||
update_time timestamp,
|
||||
PRIMARY KEY (project_id, user_id),
|
||||
FOREIGN KEY (role) REFERENCES role(role_id),
|
||||
FOREIGN KEY (project_id) REFERENCES project(project_id),
|
||||
FOREIGN KEY (user_id) REFERENCES user(user_id)
|
||||
);
|
||||
|
||||
insert into project_member (project_id, user_id, role, creation_time, update_time) values
|
||||
(1, 1, 1, CURRENT_TIMESTAMP, CURRENT_TIMESTAMP);
|
||||
|
||||
create table access_log (
|
||||
log_id INTEGER PRIMARY KEY,
|
||||
user_id int NOT NULL,
|
||||
project_id int NOT NULL,
|
||||
repo_name varchar (256),
|
||||
repo_tag varchar (128),
|
||||
GUID varchar(64),
|
||||
operation varchar(20) NOT NULL,
|
||||
op_time timestamp,
|
||||
FOREIGN KEY (user_id) REFERENCES user(user_id),
|
||||
FOREIGN KEY (project_id) REFERENCES project (project_id)
|
||||
);
|
||||
|
||||
CREATE INDEX pid_optime ON access_log (project_id, op_time);
|
||||
|
||||
create table repository (
|
||||
repository_id INTEGER PRIMARY KEY,
|
||||
name varchar(255) NOT NULL,
|
||||
project_id int NOT NULL,
|
||||
owner_id int NOT NULL,
|
||||
description text,
|
||||
pull_count int DEFAULT 0 NOT NULL,
|
||||
star_count int DEFAULT 0 NOT NULL,
|
||||
creation_time timestamp default CURRENT_TIMESTAMP,
|
||||
update_time timestamp default CURRENT_TIMESTAMP,
|
||||
FOREIGN KEY (owner_id) REFERENCES user(user_id),
|
||||
FOREIGN KEY (project_id) REFERENCES project(project_id),
|
||||
UNIQUE (name)
|
||||
);
|
||||
|
||||
create table replication_policy (
|
||||
id INTEGER PRIMARY KEY,
|
||||
name varchar(256),
|
||||
project_id int NOT NULL,
|
||||
target_id int NOT NULL,
|
||||
enabled tinyint(1) NOT NULL DEFAULT 1,
|
||||
description text,
|
||||
deleted tinyint (1) DEFAULT 0 NOT NULL,
|
||||
cron_str varchar(256),
|
||||
start_time timestamp NULL,
|
||||
creation_time timestamp default CURRENT_TIMESTAMP,
|
||||
update_time timestamp default CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
create table replication_target (
|
||||
id INTEGER PRIMARY KEY,
|
||||
name varchar(64),
|
||||
url varchar(64),
|
||||
username varchar(40),
|
||||
password varchar(128),
|
||||
/*
|
||||
target_type indicates the type of target registry,
|
||||
0 means it's a harbor instance,
|
||||
1 means it's a regulart registry
|
||||
*/
|
||||
target_type tinyint(1) NOT NULL DEFAULT 0,
|
||||
creation_time timestamp default CURRENT_TIMESTAMP,
|
||||
update_time timestamp default CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
create table replication_job (
|
||||
id INTEGER PRIMARY KEY,
|
||||
status varchar(64) NOT NULL,
|
||||
policy_id int NOT NULL,
|
||||
repository varchar(256) NOT NULL,
|
||||
operation varchar(64) NOT NULL,
|
||||
tags varchar(16384),
|
||||
creation_time timestamp default CURRENT_TIMESTAMP,
|
||||
update_time timestamp default CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
CREATE INDEX policy ON replication_job (policy_id);
|
||||
CREATE INDEX poid_uptime ON replication_job (policy_id, update_time);
|
||||
|
||||
create table properties (
|
||||
k varchar(64) NOT NULL,
|
||||
v varchar(128) NOT NULL,
|
||||
primary key (k)
|
||||
);
|
||||
|
||||
create table alembic_version (
|
||||
version_num varchar(32) NOT NULL
|
||||
);
|
||||
|
||||
insert into alembic_version values ('0.3.0');
|
26
make/common/log/rotate.sh
Executable file
|
@ -0,0 +1,26 @@
|
|||
#!/bin/bash
|
||||
set -e
|
||||
echo "Log rotate starting..."
|
||||
|
||||
#The logs n days before will be compressed.
|
||||
n=14
|
||||
path=/var/log/docker
|
||||
|
||||
list=""
|
||||
n_days_before=$(($(date +%s) - 3600*24*$n))
|
||||
for dir in $(ls $path | grep -v "tar.gz");
|
||||
do
|
||||
if [ $(date --date=$dir +%s) -lt $n_days_before ]
|
||||
then
|
||||
echo "$dir will be compressed"
|
||||
list="$list $dir"
|
||||
fi
|
||||
done
|
||||
|
||||
if [ -n "$list" ]
|
||||
then
|
||||
cd $path
|
||||
tar --remove-files -zcvf $(date -d @$n_days_before +%F)-.tar.gz $list
|
||||
fi
|
||||
|
||||
echo "Log rotate finished."
|
75
make/common/templates/nginx/nginx.http.conf
Normal file
|
@ -0,0 +1,75 @@
|
|||
worker_processes auto;
|
||||
|
||||
events {
|
||||
worker_connections 1024;
|
||||
use epoll;
|
||||
multi_accept on;
|
||||
}
|
||||
|
||||
http {
|
||||
tcp_nodelay on;
|
||||
|
||||
# this is necessary for us to be able to disable request buffering in all cases
|
||||
proxy_http_version 1.1;
|
||||
|
||||
|
||||
upstream registry {
|
||||
server registry:5000;
|
||||
}
|
||||
|
||||
upstream ui {
|
||||
server ui:80;
|
||||
}
|
||||
|
||||
|
||||
server {
|
||||
listen 80;
|
||||
|
||||
# disable any limits to avoid HTTP 413 for large image uploads
|
||||
client_max_body_size 0;
|
||||
|
||||
location / {
|
||||
proxy_pass http://ui/;
|
||||
proxy_set_header Host $$host;
|
||||
proxy_set_header X-Real-IP $$remote_addr;
|
||||
proxy_set_header X-Forwarded-For $$proxy_add_x_forwarded_for;
|
||||
|
||||
# When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings.
|
||||
proxy_set_header X-Forwarded-Proto $$scheme;
|
||||
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
||||
}
|
||||
|
||||
location /v1/ {
|
||||
return 404;
|
||||
}
|
||||
|
||||
location /v2/ {
|
||||
proxy_pass http://registry/v2/;
|
||||
proxy_set_header Host $$http_host;
|
||||
proxy_set_header X-Real-IP $$remote_addr;
|
||||
proxy_set_header X-Forwarded-For $$proxy_add_x_forwarded_for;
|
||||
|
||||
# When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings.
|
||||
proxy_set_header X-Forwarded-Proto $$scheme;
|
||||
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
||||
|
||||
}
|
||||
|
||||
location /service/ {
|
||||
proxy_pass http://ui/service/;
|
||||
proxy_set_header Host $$host;
|
||||
proxy_set_header X-Real-IP $$remote_addr;
|
||||
proxy_set_header X-Forwarded-For $$proxy_add_x_forwarded_for;
|
||||
|
||||
# When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings.
|
||||
proxy_set_header X-Forwarded-Proto $$scheme;
|
||||
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -24,15 +24,15 @@ http {
|
|||
|
||||
server {
|
||||
listen 443 ssl;
|
||||
server_name harbordomain.com;
|
||||
# server_name harbordomain.com;
|
||||
|
||||
# SSL
|
||||
ssl_certificate /etc/nginx/cert/harbordomain.crt;
|
||||
ssl_certificate_key /etc/nginx/cert/harbordomain.key;
|
||||
ssl_certificate $ssl_cert;
|
||||
ssl_certificate_key $ssl_cert_key;
|
||||
|
||||
# Recommendations from https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html
|
||||
ssl_protocols TLSv1.1 TLSv1.2;
|
||||
ssl_ciphers 'EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH';
|
||||
ssl_ciphers '!aNULL:kECDH+AESGCM:ECDH+AESGCM:RSA+AESGCM:kECDH+AES:ECDH+AES:RSA+AES:';
|
||||
ssl_prefer_server_ciphers on;
|
||||
ssl_session_cache shared:SSL:10m;
|
||||
|
||||
|
@ -44,12 +44,12 @@ http {
|
|||
|
||||
location / {
|
||||
proxy_pass http://ui/;
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Host $$http_host;
|
||||
proxy_set_header X-Real-IP $$remote_addr;
|
||||
proxy_set_header X-Forwarded-For $$proxy_add_x_forwarded_for;
|
||||
|
||||
# When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings.
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Forwarded-Proto $$scheme;
|
||||
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
||||
|
@ -61,12 +61,12 @@ http {
|
|||
|
||||
location /v2/ {
|
||||
proxy_pass http://registry/v2/;
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Host $$http_host;
|
||||
proxy_set_header X-Real-IP $$remote_addr;
|
||||
proxy_set_header X-Forwarded-For $$proxy_add_x_forwarded_for;
|
||||
|
||||
# When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings.
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Forwarded-Proto $$scheme;
|
||||
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
||||
|
@ -75,12 +75,12 @@ http {
|
|||
|
||||
location /service/ {
|
||||
proxy_pass http://ui/service/;
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Host $$http_host;
|
||||
proxy_set_header X-Real-IP $$remote_addr;
|
||||
proxy_set_header X-Forwarded-For $$proxy_add_x_forwarded_for;
|
||||
|
||||
# When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings.
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Forwarded-Proto $$scheme;
|
||||
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
||||
|
@ -88,7 +88,7 @@ http {
|
|||
}
|
||||
server {
|
||||
listen 80;
|
||||
server_name harbordomain.com;
|
||||
rewrite ^/(.*) https://$server_name:443/$1 permanent;
|
||||
#server_name harbordomain.com;
|
||||
return 301 https://$$host$$request_uri;
|
||||
}
|
||||
}
|
|
@ -1,7 +1,9 @@
|
|||
version: '2'
|
||||
services:
|
||||
log:
|
||||
build: ./log/
|
||||
build:
|
||||
context: ../../
|
||||
dockerfile: make/ubuntu/log/Dockerfile
|
||||
restart: always
|
||||
volumes:
|
||||
- /var/log/harbor/:/var/log/docker/
|
||||
|
@ -12,7 +14,7 @@ services:
|
|||
restart: always
|
||||
volumes:
|
||||
- /data/registry:/storage
|
||||
- ./config/registry/:/etc/registry/
|
||||
- ../common/config/registry/:/etc/registry/
|
||||
environment:
|
||||
- GODEBUG=netdns=cgo
|
||||
command:
|
||||
|
@ -25,12 +27,12 @@ services:
|
|||
syslog-address: "tcp://127.0.0.1:1514"
|
||||
tag: "registry"
|
||||
mysql:
|
||||
build: ./db/
|
||||
build: ../common/db/
|
||||
restart: always
|
||||
volumes:
|
||||
- /data/database:/var/lib/mysql
|
||||
env_file:
|
||||
- ./config/db/env
|
||||
- ../common/config/db/env
|
||||
depends_on:
|
||||
- log
|
||||
logging:
|
||||
|
@ -40,14 +42,14 @@ services:
|
|||
tag: "mysql"
|
||||
ui:
|
||||
build:
|
||||
context: ../
|
||||
dockerfile: Deploy/ui/Dockerfile
|
||||
context: ../../
|
||||
dockerfile: make/dev/ui/Dockerfile
|
||||
env_file:
|
||||
- ./config/ui/env
|
||||
- ../common/config/ui/env
|
||||
restart: always
|
||||
volumes:
|
||||
- ./config/ui/app.conf:/etc/ui/app.conf
|
||||
- ./config/ui/private_key.pem:/etc/ui/private_key.pem
|
||||
- ../common/config/ui/app.conf:/etc/ui/app.conf
|
||||
- ../common/config/ui/private_key.pem:/etc/ui/private_key.pem
|
||||
depends_on:
|
||||
- log
|
||||
logging:
|
||||
|
@ -57,14 +59,14 @@ services:
|
|||
tag: "ui"
|
||||
jobservice:
|
||||
build:
|
||||
context: ../
|
||||
dockerfile: Deploy/jobservice/Dockerfile
|
||||
context: ../../
|
||||
dockerfile: make/dev/jobservice/Dockerfile
|
||||
env_file:
|
||||
- ./config/jobservice/env
|
||||
- ../common/config/jobservice/env
|
||||
restart: always
|
||||
volumes:
|
||||
- /data/job_logs:/var/log/jobs
|
||||
- ./config/jobservice/app.conf:/etc/jobservice/app.conf
|
||||
- ../common/config/jobservice/app.conf:/etc/jobservice/app.conf
|
||||
depends_on:
|
||||
- ui
|
||||
logging:
|
||||
|
@ -76,7 +78,7 @@ services:
|
|||
image: library/nginx:1.9
|
||||
restart: always
|
||||
volumes:
|
||||
- ./config/nginx:/etc/nginx
|
||||
- ../common/config/nginx:/etc/nginx
|
||||
ports:
|
||||
- 80:80
|
||||
- 443:443
|
|
@ -8,7 +8,7 @@ RUN apt-get update \
|
|||
|
||||
COPY . /go/src/github.com/vmware/harbor
|
||||
|
||||
WORKDIR /go/src/github.com/vmware/harbor/jobservice
|
||||
WORKDIR /go/src/github.com/vmware/harbor/src/jobservice
|
||||
|
||||
RUN go build -v -a -o /go/bin/harbor_jobservice \
|
||||
&& chmod u+x /go/bin/harbor_jobservice
|
|
@ -6,8 +6,8 @@ RUN apt-get update \
|
|||
&& apt-get install -y libldap2-dev \
|
||||
&& rm -r /var/lib/apt/lists/*
|
||||
|
||||
COPY . /go/src/github.com/vmware/harbor
|
||||
WORKDIR /go/src/github.com/vmware/harbor/ui
|
||||
COPY src/. /go/src/github.com/vmware/harbor/src
|
||||
WORKDIR /go/src/github.com/vmware/harbor/src/ui
|
||||
|
||||
RUN go build -v -a -o /go/bin/harbor_ui
|
||||
|
||||
|
@ -15,15 +15,15 @@ ENV MYSQL_USR root \
|
|||
MYSQL_PWD root \
|
||||
REGISTRY_URL localhost:5000
|
||||
|
||||
COPY views /go/bin/views
|
||||
COPY static /go/bin/static
|
||||
COPY favicon.ico /go/bin/favicon.ico
|
||||
COPY Deploy/jsminify.sh /tmp/jsminify.sh
|
||||
COPY src/ui/views /go/bin/views
|
||||
COPY src/ui/static /go/bin/static
|
||||
COPY src/favicon.ico /go/bin/favicon.ico
|
||||
COPY make/jsminify.sh /tmp/jsminify.sh
|
||||
|
||||
RUN chmod u+x /go/bin/harbor_ui \
|
||||
&& sed -i 's/TLS_CACERT/#TLS_CAERT/g' /etc/ldap/ldap.conf \
|
||||
&& sed -i '$a\TLS_REQCERT allow' /etc/ldap/ldap.conf \
|
||||
&& /tmp/jsminify.sh /go/bin/views/sections/script-include.htm /go/bin/static/resources/js/harbor.app.min.js
|
||||
&& /tmp/jsminify.sh /go/bin/views/sections/script-include.htm /go/bin/static/resources/js/harbor.app.min.js /go/bin/
|
||||
|
||||
WORKDIR /go/bin/
|
||||
ENTRYPOINT ["/go/bin/harbor_ui"]
|
|
@ -1,7 +1,8 @@
|
|||
version: '2'
|
||||
services:
|
||||
log:
|
||||
image: harbor_log_photon
|
||||
image: vmware/harbor-log
|
||||
container_name: harbor-log
|
||||
restart: always
|
||||
volumes:
|
||||
- /var/log/harbor/:/var/log/docker/
|
||||
|
@ -9,10 +10,11 @@ services:
|
|||
- 1514:514
|
||||
registry:
|
||||
image: library/registry:2.5.0
|
||||
container_name: registry
|
||||
restart: always
|
||||
volumes:
|
||||
- /data/registry:/storage
|
||||
- ./config/registry/:/etc/registry/
|
||||
- ./common/config/registry/:/etc/registry/
|
||||
environment:
|
||||
- GODEBUG=netdns=cgo
|
||||
command:
|
||||
|
@ -25,12 +27,13 @@ services:
|
|||
syslog-address: "tcp://127.0.0.1:1514"
|
||||
tag: "registry"
|
||||
mysql:
|
||||
build: ./db/
|
||||
image: vmware/harbor-db
|
||||
container_name: harbor-db
|
||||
restart: always
|
||||
volumes:
|
||||
- /data/database:/var/lib/mysql
|
||||
env_file:
|
||||
- ./config/db/env
|
||||
- ./common/config/db/env
|
||||
depends_on:
|
||||
- log
|
||||
logging:
|
||||
|
@ -39,13 +42,14 @@ services:
|
|||
syslog-address: "tcp://127.0.0.1:1514"
|
||||
tag: "mysql"
|
||||
ui:
|
||||
image: harbor_ui_photon
|
||||
image: vmware/harbor-ui
|
||||
container_name: harbor-ui
|
||||
env_file:
|
||||
- ./config/ui/env
|
||||
- ./common/config/ui/env
|
||||
restart: always
|
||||
volumes:
|
||||
- ./config/ui/app.conf:/etc/ui/app.conf
|
||||
- ./config/ui/private_key.pem:/etc/ui/private_key.pem
|
||||
- ./common/config/ui/app.conf:/etc/ui/app.conf
|
||||
- ./common/config/ui/private_key.pem:/etc/ui/private_key.pem
|
||||
depends_on:
|
||||
- log
|
||||
logging:
|
||||
|
@ -54,13 +58,14 @@ services:
|
|||
syslog-address: "tcp://127.0.0.1:1514"
|
||||
tag: "ui"
|
||||
jobservice:
|
||||
image: harbor_jobservice_photon
|
||||
image: vmware/harbor-jobservice
|
||||
container_name: harbor-jobservice
|
||||
env_file:
|
||||
- ./config/jobservice/env
|
||||
- ./common/config/jobservice/env
|
||||
restart: always
|
||||
volumes:
|
||||
- /data/job_logs:/var/log/jobs
|
||||
- ./config/jobservice/app.conf:/etc/jobservice/app.conf
|
||||
- ./common/config/jobservice/app.conf:/etc/jobservice/app.conf
|
||||
depends_on:
|
||||
- ui
|
||||
logging:
|
||||
|
@ -69,10 +74,11 @@ services:
|
|||
syslog-address: "tcp://127.0.0.1:1514"
|
||||
tag: "jobservice"
|
||||
proxy:
|
||||
image: library/nginx:1.9.0
|
||||
image: nginx:1.9
|
||||
container_name: nginx
|
||||
restart: always
|
||||
volumes:
|
||||
- ./config/nginx:/etc/nginx
|
||||
- ./common/config/nginx:/etc/nginx
|
||||
ports:
|
||||
- 80:80
|
||||
- 443:443
|
|
@ -60,11 +60,6 @@ use_compressed_js = on
|
|||
#Maximum number of job workers in job service
|
||||
max_job_workers = 3
|
||||
|
||||
#Secret key for encryption/decryption of password of remote registry, its length has to be 16 chars
|
||||
#**NOTE** if this changes, previously encrypted password will not be decrypted!
|
||||
#Change this key before any production use.
|
||||
secret_key = secretkey1234567
|
||||
|
||||
#The expiration time (in minute) of token created by token service, default is 30 minutes
|
||||
token_expiration = 30
|
||||
|
||||
|
@ -86,4 +81,9 @@ crt_organization = organization
|
|||
crt_organizationalunit = organizational unit
|
||||
crt_commonname = example.com
|
||||
crt_email = example@example.com
|
||||
#####
|
||||
|
||||
|
||||
#The path of cert and key files for nginx, they are applied only the protocol is set to https
|
||||
ssl_cert = /path/to/server.crt
|
||||
ssl_cert_key = /path/to/server.key
|
||||
#############
|
181
make/install.sh
Executable file
|
@ -0,0 +1,181 @@
|
|||
#!/bin/bash
|
||||
|
||||
#docker version: 1.11.2
|
||||
#docker-compose version: 1.7.1
|
||||
#Harbor version: 0.4.0
|
||||
|
||||
set +e
|
||||
set -o noglob
|
||||
|
||||
#
|
||||
# Set Colors
|
||||
#
|
||||
|
||||
bold=$(tput bold)
|
||||
underline=$(tput sgr 0 1)
|
||||
reset=$(tput sgr0)
|
||||
|
||||
red=$(tput setaf 1)
|
||||
green=$(tput setaf 76)
|
||||
white=$(tput setaf 7)
|
||||
tan=$(tput setaf 202)
|
||||
blue=$(tput setaf 25)
|
||||
|
||||
#
|
||||
# Headers and Logging
|
||||
#
|
||||
|
||||
underline() { printf "${underline}${bold}%s${reset}\n" "$@"
|
||||
}
|
||||
h1() { printf "\n${underline}${bold}${blue}%s${reset}\n" "$@"
|
||||
}
|
||||
h2() { printf "\n${underline}${bold}${white}%s${reset}\n" "$@"
|
||||
}
|
||||
debug() { printf "${white}%s${reset}\n" "$@"
|
||||
}
|
||||
info() { printf "${white}➜ %s${reset}\n" "$@"
|
||||
}
|
||||
success() { printf "${green}✔ %s${reset}\n" "$@"
|
||||
}
|
||||
error() { printf "${red}✖ %s${reset}\n" "$@"
|
||||
}
|
||||
warn() { printf "${tan}➜ %s${reset}\n" "$@"
|
||||
}
|
||||
bold() { printf "${bold}%s${reset}\n" "$@"
|
||||
}
|
||||
note() { printf "\n${underline}${bold}${blue}Note:${reset} ${blue}%s${reset}\n" "$@"
|
||||
}
|
||||
|
||||
set -e
|
||||
set +o noglob
|
||||
|
||||
usage=$'Please set hostname and other necessary attributes in harbor.cfg first. DO NOT use localhost or 127.0.0.1 for hostname, because Harbor needs to be accessed by external clients.'
|
||||
item=0
|
||||
|
||||
while [ $# -gt 0 ]; do
|
||||
case $1 in
|
||||
--help)
|
||||
note "$usage"
|
||||
exit 0;;
|
||||
*)
|
||||
note "$usage"
|
||||
exit 1;;
|
||||
esac
|
||||
shift || true
|
||||
done
|
||||
|
||||
workdir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
cd $workdir
|
||||
|
||||
# The hostname in harbor.cfg has not been modified
|
||||
if grep 'hostname = reg.mydomain.com' &> /dev/null harbor.cfg
|
||||
then
|
||||
warn "$usage"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
function check_docker {
|
||||
if ! docker --version &> /dev/null
|
||||
then
|
||||
error "Need to install docker(1.10.0+) first and run this script again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# docker has been installed and check its version
|
||||
if [[ $(docker --version) =~ (([0-9]+).([0-9]+).([0-9]+)) ]]
|
||||
then
|
||||
docker_version=${BASH_REMATCH[1]}
|
||||
docker_version_part1=${BASH_REMATCH[2]}
|
||||
docker_version_part2=${BASH_REMATCH[3]}
|
||||
|
||||
# the version of docker does not meet the requirement
|
||||
if [ "$docker_version_part1" -lt 1 ] || ([ "$docker_version_part1" -eq 1 ] && [ "$docker_version_part2" -lt 10 ])
|
||||
then
|
||||
error "Need to upgrade docker package to 1.10.0+."
|
||||
exit 1
|
||||
else
|
||||
note "docker version: $docker_version"
|
||||
fi
|
||||
else
|
||||
error "Failed to parse docker version."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function check_dockercompose {
|
||||
if ! docker-compose --version &> /dev/null
|
||||
then
|
||||
error "Need to install docker-compose(1.7.1+) by yourself first and run this script again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# docker-compose has been installed, check its version
|
||||
if [[ $(docker-compose --version) =~ (([0-9]+).([0-9]+).([0-9]+)) ]]
|
||||
then
|
||||
docker_compose_version=${BASH_REMATCH[1]}
|
||||
docker_compose_version_part1=${BASH_REMATCH[2]}
|
||||
docker_compose_version_part2=${BASH_REMATCH[3]}
|
||||
|
||||
# the version of docker-compose does not meet the requirement
|
||||
if [ "$docker_compose_version_part1" -lt 1 ] || ([ "$docker_compose_version_part1" -eq 1 ] && [ "$docker_compose_version_part2" -lt 6 ])
|
||||
then
|
||||
error "Need to upgrade docker-compose package to 1.7.1+."
|
||||
exit 1
|
||||
else
|
||||
note "docker-compose version: $docker_compose_version"
|
||||
fi
|
||||
else
|
||||
error "Failed to parse docker-compose version."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
h2 "[Step $item]: checking installation environment ..."; let item+=1
|
||||
check_docker
|
||||
check_dockercompose
|
||||
|
||||
if [ -f harbor*.tgz ]
|
||||
then
|
||||
h2 "[Step $item]: loading Harbor images ..."; let item+=1
|
||||
docker load -i ./harbor*.tgz
|
||||
fi
|
||||
echo ""
|
||||
|
||||
h2 "[Step $item]: preparing environment ..."; let item+=1
|
||||
if [ -n "$host" ]
|
||||
then
|
||||
sed "s/^hostname = .*/hostname = $host/g" -i ./harbor.cfg
|
||||
fi
|
||||
./prepare
|
||||
echo ""
|
||||
|
||||
h2 "[Step $item]: checking existing instance of Harbor ..."; let item+=1
|
||||
if [ -n "$(docker-compose -f docker-compose*.yml ps -q)" ]
|
||||
then
|
||||
note "stopping existing Harbor instance ..."
|
||||
docker-compose -f docker-compose*.yml down
|
||||
fi
|
||||
echo ""
|
||||
|
||||
h2 "[Step $item]: starting Harbor ..."
|
||||
docker-compose -f docker-compose*.yml up -d
|
||||
|
||||
protocol=http
|
||||
hostname=reg.mydomain.com
|
||||
|
||||
if [[ $(cat ./harbor.cfg) =~ ui_url_protocol[[:blank:]]*=[[:blank:]]*(https?) ]]
|
||||
then
|
||||
protocol=${BASH_REMATCH[1]}
|
||||
fi
|
||||
|
||||
if [[ $(grep 'hostname[[:blank:]]*=' ./harbor.cfg) =~ hostname[[:blank:]]*=[[:blank:]]*(.*) ]]
|
||||
then
|
||||
hostname=${BASH_REMATCH[1]}
|
||||
fi
|
||||
echo ""
|
||||
|
||||
success $"----Harbor has been installed and started successfully.----
|
||||
|
||||
Now you should be able to visit the admin portal at ${protocol}://${hostname}.
|
||||
For more details, please visit https://github.com/vmware/harbor .
|
||||
"
|
79
make/photon/Makefile
Normal file
|
@ -0,0 +1,79 @@
|
|||
# Makefile for a harbor project
|
||||
#
|
||||
# Targets:
|
||||
#
|
||||
# build: build harbor photon images
|
||||
# clean: clean ui and jobservice harbor images
|
||||
|
||||
# common
|
||||
SHELL := /bin/bash
|
||||
BUILDPATH=$(CURDIR)
|
||||
MAKEPATH=$(BUILDPATH)/make
|
||||
MAKEDEVPATH=$(MAKEPATH)/dev
|
||||
SRCPATH=./src
|
||||
TOOLSPATH=$(BUILDPATH)/tools
|
||||
CHECKENVCMD=checkenv.sh
|
||||
DEVFLAG=true
|
||||
|
||||
# docker parameters
|
||||
DOCKERCMD=$(shell which docker)
|
||||
DOCKERBUILD=$(DOCKERCMD) build
|
||||
DOCKERRMIMAGE=$(DOCKERCMD) rmi
|
||||
DOCKERIMASES=$(DOCKERCMD) images
|
||||
|
||||
# binary
|
||||
UISOURCECODE=$(SRCPATH)/ui
|
||||
UIBINARYPATH=$(MAKEDEVPATH)/ui
|
||||
UIBINARYNAME=harbor_ui
|
||||
JOBSERVICESOURCECODE=$(SRCPATH)/jobservice
|
||||
JOBSERVICEBINARYPATH=$(MAKEDEVPATH)/jobservice
|
||||
JOBSERVICEBINARYNAME=harbor_jobservice
|
||||
|
||||
# photon dockerfile
|
||||
DOCKERFILEPATH=$(MAKEPATH)/photon
|
||||
DOCKERFILEPATH_UI=$(DOCKERFILEPATH)/ui
|
||||
DOCKERFILENAME_UI=Dockerfile
|
||||
DOCKERIMAGENAME_UI=vmware/harbor-ui
|
||||
DOCKERFILEPATH_JOBSERVICE=$(DOCKERFILEPATH)/jobservice
|
||||
DOCKERFILENAME_JOBSERVICE=Dockerfile
|
||||
DOCKERIMAGENAME_JOBSERVICE=vmware/harbor-jobservice
|
||||
DOCKERFILEPATH_LOG=$(DOCKERFILEPATH)/log
|
||||
DOCKERFILENAME_LOG=Dockerfile
|
||||
DOCKERIMAGENAME_LOG=vmware/harbor-log
|
||||
|
||||
# version prepare
|
||||
VERSIONFILEPATH=$(SRCPATH)/views/sections
|
||||
VERSIONFILENAME=header-content.htm
|
||||
GITCMD=$(shell which git)
|
||||
GITTAG=$(GITCMD) describe --tags
|
||||
ifeq ($(DEVFLAG), true)
|
||||
VERSIONTAG=dev
|
||||
else
|
||||
VERSIONTAG=$(shell $(GITTAG))
|
||||
endif
|
||||
|
||||
check_environment:
|
||||
@$(MAKEPATH)/$(CHECKENVCMD)
|
||||
|
||||
build:
|
||||
@echo "building ui container for photon..."
|
||||
$(DOCKERBUILD) -f $(DOCKERFILEPATH_UI)/$(DOCKERFILENAME_UI) -t $(DOCKERIMAGENAME_UI):$(VERSIONTAG) .
|
||||
@echo "Done."
|
||||
|
||||
@echo "building jobservice container for photon..."
|
||||
$(DOCKERBUILD) -f $(DOCKERFILEPATH_JOBSERVICE)/$(DOCKERFILENAME_JOBSERVICE) -t $(DOCKERIMAGENAME_JOBSERVICE):$(VERSIONTAG) .
|
||||
@echo "Done."
|
||||
|
||||
@echo "building log container for photon..."
|
||||
$(DOCKERBUILD) -f $(DOCKERFILEPATH_LOG)/$(DOCKERFILENAME_LOG) -t $(DOCKERIMAGENAME_LOG):$(VERSIONTAG) .
|
||||
@echo "Done."
|
||||
|
||||
cleanimage:
|
||||
@echo "cleaning image for photon..."
|
||||
- $(DOCKERRMIMAGE) -f $(DOCKERIMAGENAME_UI):$(VERSIONTAG)
|
||||
- $(DOCKERRMIMAGE) -f $(DOCKERIMAGENAME_JOBSERVICE):$(VERSIONTAG)
|
||||
- $(DOCKERRMIMAGE) -f $(DOCKERIMAGENAME_LOG):$(VERSIONTAG)
|
||||
|
||||
.PHONY: clean
|
||||
clean: cleanimage
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
FROM library/photon:latest
|
||||
|
||||
RUN mkdir /harbor/
|
||||
COPY ./Deploy/jobservice/harbor_jobservice /harbor/
|
||||
COPY ./make/dev/jobservice/harbor_jobservice /harbor/
|
||||
|
||||
RUN chmod u+x /harbor/harbor_jobservice
|
||||
WORKDIR /harbor/
|
|
@ -1,23 +1,20 @@
|
|||
FROM library/photon:latest
|
||||
|
||||
# run logrotate hourly, disable imklog model, provides TCP/UDP syslog reception
|
||||
RUN tdnf install -y cronie rsyslog logrotate shadow\
|
||||
&& mv /etc/cron.daily/logrotate /etc/cron.hourly/ \
|
||||
RUN tdnf install -y cronie rsyslog shadow tar gzip \
|
||||
&& mkdir /etc/rsyslog.d/ \
|
||||
&& mkdir /var/spool/rsyslog \
|
||||
&& groupadd syslog \
|
||||
&& useradd -g syslog syslog
|
||||
|
||||
ADD rsyslog.conf /etc/rsyslog.conf
|
||||
ADD make/common/log/rsyslog.conf /etc/rsyslog.conf
|
||||
|
||||
COPY logrotate.conf.photon /etc/logrotate.conf
|
||||
|
||||
# logrotate configuration file for docker
|
||||
ADD logrotate_docker.conf /etc/logrotate.d/
|
||||
# rotate logs weekly
|
||||
# notes: file name cannot contain dot, or the script will not run
|
||||
ADD make/common/log/rotate.sh /etc/cron.weekly/rotate
|
||||
|
||||
# rsyslog configuration file for docker
|
||||
ADD rsyslog_docker.conf /etc/rsyslog.d/
|
||||
|
||||
ADD make/common/log/rsyslog_docker.conf /etc/rsyslog.d/
|
||||
|
||||
VOLUME /var/log/docker/
|
||||
|
|
@ -3,12 +3,12 @@ FROM library/photon:latest
|
|||
RUN mkdir /harbor/
|
||||
RUN tdnf install -y sed apr-util-ldap
|
||||
|
||||
COPY ./Deploy/ui/harbor_ui /harbor/
|
||||
COPY ./make/dev/ui/harbor_ui /harbor/
|
||||
|
||||
COPY ./views /harbor/views
|
||||
COPY ./static /harbor/static
|
||||
COPY ./favicon.ico /harbor/favicon.ico
|
||||
COPY ./Deploy/jsminify.sh /tmp/jsminify.sh
|
||||
COPY ./src/ui/views /harbor/views
|
||||
COPY ./src/ui/static /harbor/static
|
||||
COPY ./src/favicon.ico /harbor/favicon.ico
|
||||
COPY ./make/jsminify.sh /tmp/jsminify.sh
|
||||
|
||||
RUN chmod u+x /harbor/harbor_ui \
|
||||
&& tmp/jsminify.sh /harbor/views/sections/script-include.htm /harbor/static/resources/js/harbor.app.min.js /harbor/ \
|
|
@ -8,6 +8,7 @@ import os
|
|||
import sys
|
||||
import argparse
|
||||
import subprocess
|
||||
import shutil
|
||||
from io import open
|
||||
|
||||
if sys.version_info[:3][0] == 2:
|
||||
|
@ -19,11 +20,44 @@ if sys.version_info[:3][0] == 3:
|
|||
import io as StringIO
|
||||
|
||||
def validate(conf):
|
||||
if len(conf.get("configuration", "secret_key")) != 16:
|
||||
raise Exception("Error: The length of secret key has to be 16 characters!")
|
||||
protocol = rcp.get("configuration", "ui_url_protocol")
|
||||
if protocol == "https":
|
||||
if not rcp.has_option("configuration", "ssl_cert"):
|
||||
raise Exception("Error: The protocol is https but attribute ssl_cert is not set")
|
||||
cert_path = rcp.get("configuration", "ssl_cert")
|
||||
if not os.path.isfile(cert_path):
|
||||
raise Exception("Error: The path for certificate: %s is invalid" % cert_path)
|
||||
if not rcp.has_option("configuration", "ssl_cert_key"):
|
||||
raise Exception("Error: The protocol is https but attribute ssl_cert_key is not set")
|
||||
cert_key_path = rcp.get("configuration", "ssl_cert_key")
|
||||
if not os.path.isfile(cert_key_path):
|
||||
raise Exception("Error: The path for certificate key: %s is invalid" % cert_key_path)
|
||||
|
||||
def get_secret_key(path):
|
||||
key_file = os.path.join(path, "secretkey")
|
||||
if os.path.isfile(key_file):
|
||||
with open(key_file, 'r') as f:
|
||||
key = f.read()
|
||||
print("loaded secret key")
|
||||
if len(key) != 16:
|
||||
raise Exception("secret key's length has to be 16 chars, current length: %d" % len(key))
|
||||
return key
|
||||
if not os.path.isdir(path):
|
||||
os.makedirs(path, mode=0600)
|
||||
key = ''.join(random.choice(string.ascii_letters+string.digits) for i in range(16))
|
||||
with open(key_file, 'w') as f:
|
||||
f.write(key)
|
||||
print("generated and saved secret key")
|
||||
return key
|
||||
|
||||
base_dir = os.path.dirname(__file__)
|
||||
config_dir = os.path.join(base_dir, "common/config")
|
||||
templates_dir = os.path.join(base_dir, "common/templates")
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('-conf', dest='cfgfile', default = 'harbor.cfg',type=str,help="the path of Harbor configuration file")
|
||||
parser.add_argument('-conf', dest='cfgfile', default=base_dir+'/harbor.cfg',type=str,help="the path of Harbor configuration file")
|
||||
parser.add_argument('--data-volume', dest='data_volume', default='/data/',type=str,help="the path of Harbor data volume, which is set in template of docker-compose.")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
#Read configurations
|
||||
|
@ -37,7 +71,8 @@ rcp.readfp(conf)
|
|||
validate(rcp)
|
||||
|
||||
hostname = rcp.get("configuration", "hostname")
|
||||
ui_url = rcp.get("configuration", "ui_url_protocol") + "://" + hostname
|
||||
protocol = rcp.get("configuration", "ui_url_protocol")
|
||||
ui_url = protocol + "://" + hostname
|
||||
email_server = rcp.get("configuration", "email_server")
|
||||
email_server_port = rcp.get("configuration", "email_server_port")
|
||||
email_username = rcp.get("configuration", "email_username")
|
||||
|
@ -65,6 +100,9 @@ ldap_scope = rcp.get("configuration", "ldap_scope")
|
|||
db_password = rcp.get("configuration", "db_password")
|
||||
self_registration = rcp.get("configuration", "self_registration")
|
||||
use_compressed_js = rcp.get("configuration", "use_compressed_js")
|
||||
if protocol == "https":
|
||||
cert_path = rcp.get("configuration", "ssl_cert")
|
||||
cert_key_path = rcp.get("configuration", "ssl_cert_key")
|
||||
customize_crt = rcp.get("configuration", "customize_crt")
|
||||
crt_country = rcp.get("configuration", "crt_country")
|
||||
crt_state = rcp.get("configuration", "crt_state")
|
||||
|
@ -76,15 +114,12 @@ crt_email = rcp.get("configuration", "crt_email")
|
|||
max_job_workers = rcp.get("configuration", "max_job_workers")
|
||||
token_expiration = rcp.get("configuration", "token_expiration")
|
||||
verify_remote_cert = rcp.get("configuration", "verify_remote_cert")
|
||||
secret_key = rcp.get("configuration", "secret_key")
|
||||
#secret_key = rcp.get("configuration", "secret_key")
|
||||
secret_key = get_secret_key(args.data_volume)
|
||||
########
|
||||
|
||||
ui_secret = ''.join(random.choice(string.ascii_letters+string.digits) for i in range(16))
|
||||
|
||||
base_dir = os.path.dirname(__file__)
|
||||
config_dir = os.path.join(base_dir, "config")
|
||||
templates_dir = os.path.join(base_dir, "templates")
|
||||
|
||||
ui_config_dir = os.path.join(config_dir,"ui")
|
||||
if not os.path.exists(ui_config_dir):
|
||||
os.makedirs(os.path.join(config_dir, "ui"))
|
||||
|
@ -108,15 +143,31 @@ ui_conf = os.path.join(config_dir, "ui", "app.conf")
|
|||
registry_conf = os.path.join(config_dir, "registry", "config.yml")
|
||||
db_conf_env = os.path.join(config_dir, "db", "env")
|
||||
job_conf_env = os.path.join(config_dir, "jobservice", "env")
|
||||
|
||||
conf_files = [ ui_conf, ui_conf_env, registry_conf, db_conf_env, job_conf_env ]
|
||||
nginx_conf = os.path.join(config_dir, "nginx", "nginx.conf")
|
||||
cert_dir = os.path.join(config_dir, "nginx", "cert")
|
||||
conf_files = [ ui_conf, ui_conf_env, registry_conf, db_conf_env, job_conf_env, nginx_conf, cert_dir ]
|
||||
def rmdir(cf):
|
||||
for f in cf:
|
||||
if os.path.exists(f):
|
||||
print("Clearing the configuration file: %s" % f)
|
||||
os.remove(f)
|
||||
if os.path.isdir(f):
|
||||
rmdir(map(lambda x: os.path.join(f,x), os.listdir(f)))
|
||||
elif os.path.exists(f) and os.path.basename(f) != ".gitignore":
|
||||
print("Clearing the configuration file: %s" % f)
|
||||
os.remove(f)
|
||||
rmdir(conf_files)
|
||||
|
||||
if protocol == "https":
|
||||
target_cert_path = os.path.join(cert_dir, os.path.basename(cert_path))
|
||||
shutil.copy2(cert_path,target_cert_path)
|
||||
target_cert_key_path = os.path.join(cert_dir, os.path.basename(cert_key_path))
|
||||
shutil.copy2(cert_key_path,target_cert_key_path)
|
||||
render(os.path.join(templates_dir, "nginx", "nginx.https.conf"),
|
||||
nginx_conf,
|
||||
ssl_cert = os.path.join("/etc/nginx/cert", os.path.basename(target_cert_path)),
|
||||
ssl_cert_key = os.path.join("/etc/nginx/cert", os.path.basename(target_cert_key_path)))
|
||||
else:
|
||||
render(os.path.join(templates_dir, "nginx", "nginx.http.conf"),
|
||||
nginx_conf)
|
||||
|
||||
render(os.path.join(templates_dir, "ui", "env"),
|
||||
ui_conf_env,
|
||||
hostname=hostname,
|
139
make/pushimage.sh
Executable file
|
@ -0,0 +1,139 @@
|
|||
#!/bin/bash
|
||||
|
||||
set +e
|
||||
set -o noglob
|
||||
|
||||
echo "This shell will push specific image to registry server."
|
||||
echo "Usage: #./pushimage [imgae tag] [registry username] [registry password] [registry server]"
|
||||
|
||||
#
|
||||
# Set Colors
|
||||
#
|
||||
|
||||
bold=$(tput bold)
|
||||
underline=$(tput sgr 0 1)
|
||||
reset=$(tput sgr0)
|
||||
|
||||
red=$(tput setaf 1)
|
||||
green=$(tput setaf 76)
|
||||
white=$(tput setaf 7)
|
||||
tan=$(tput setaf 202)
|
||||
blue=$(tput setaf 25)
|
||||
|
||||
#
|
||||
# Headers and Logging
|
||||
#
|
||||
|
||||
underline() { printf "${underline}${bold}%s${reset}\n" "$@"
|
||||
}
|
||||
h1() { printf "\n${underline}${bold}${blue}%s${reset}\n" "$@"
|
||||
}
|
||||
h2() { printf "\n${underline}${bold}${white}%s${reset}\n" "$@"
|
||||
}
|
||||
debug() { printf "${white}%s${reset}\n" "$@"
|
||||
}
|
||||
info() { printf "${white}➜ %s${reset}\n" "$@"
|
||||
}
|
||||
success() { printf "${green}✔ %s${reset}\n" "$@"
|
||||
}
|
||||
error() { printf "${red}✖ %s${reset}\n" "$@"
|
||||
}
|
||||
warn() { printf "${tan}➜ %s${reset}\n" "$@"
|
||||
}
|
||||
bold() { printf "${bold}%s${reset}\n" "$@"
|
||||
}
|
||||
note() { printf "\n${underline}${bold}${blue}Note:${reset} ${blue}%s${reset}\n" "$@"
|
||||
}
|
||||
|
||||
|
||||
type_exists() {
|
||||
if [ $(type -P $1) ]; then
|
||||
return 0
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
# Check variables
|
||||
if [ -z $1 ]; then
|
||||
error "Please set the 'image' variable"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z $2 ]; then
|
||||
error "Please set the 'username' variable"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z $3 ]; then
|
||||
error "Please set the 'password' variable"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z $4 ]; then
|
||||
info "Using default registry server (dockerhub)."
|
||||
fi
|
||||
|
||||
|
||||
# Check Docker is installed
|
||||
if ! type_exists 'docker'; then
|
||||
error "Docker is not installed."
|
||||
info "Please install docker package."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Variables
|
||||
IMAGE="$1"
|
||||
USERNAME="$2"
|
||||
PASSWORD="$3"
|
||||
REGISTRY="$4"
|
||||
|
||||
set -e
|
||||
|
||||
# ----- Pushing image(s) -----
|
||||
# see documentation :
|
||||
# - https://docs.docker.com/reference/commandline/cli/#login
|
||||
# - https://docs.docker.com/reference/commandline/cli/#push
|
||||
# - https://docs.docker.com/reference/commandline/cli/#logout
|
||||
# ---------------------------
|
||||
|
||||
# Login to the registry
|
||||
h2 "Login to the Docker registry"
|
||||
|
||||
DOCKER_LOGIN="docker login --username $USERNAME --password $PASSWORD $4"
|
||||
info "docker login --username $USERNAME --password *******"
|
||||
DOCKER_LOGIN_OUTPUT=$($DOCKER_LOGIN)
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
warn "$DOCKER_LOGIN_OUTPUT"
|
||||
error "Login to Docker registry $REGISTRY failed"
|
||||
exit 1
|
||||
else
|
||||
success "Login to Docker registry $REGISTRY succeeded";
|
||||
fi
|
||||
|
||||
# Push the docker image
|
||||
h2 "Pushing image to Docker registry"
|
||||
|
||||
DOCKER_PUSH="docker push $IMAGE"
|
||||
info "$DOCKER_PUSH"
|
||||
DOCKER_PUSH_OUTPUT=$($DOCKER_PUSH)
|
||||
|
||||
if [ $? -ne 0 ];then
|
||||
warn $DOCKER_PUSH_OUTPUT
|
||||
error "Pushing image $IMAGE failed";
|
||||
else
|
||||
success "Pushing image $IMAGE succeeded";
|
||||
fi
|
||||
|
||||
# Logout from the registry
|
||||
h2 "Logout from the docker registry"
|
||||
DOCKER_LOGOUT="docker logout $REGISTRY"
|
||||
DOCKER_LOGOUT_OUTPUT=$($DOCKER_LOGOUT)
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
warn "$DOCKER_LOGOUT_OUTPUT"
|
||||
error "Logout from Docker registry $REGISTRY failed"
|
||||
exit 1
|
||||
else
|
||||
success "Logout from Docker registry $REGISTRY succeeded"
|
||||
fi
|
79
make/ubuntu/Makefile
Normal file
|
@ -0,0 +1,79 @@
|
|||
# Makefile for a harbor project
|
||||
#
|
||||
# Targets:
|
||||
#
|
||||
# build: build harbor ubuntu images
|
||||
# clean: clean ui and jobservice harbor images
|
||||
|
||||
# common
|
||||
SHELL := /bin/bash
|
||||
BUILDPATH=$(CURDIR)
|
||||
MAKEPATH=$(BUILDPATH)/make
|
||||
MAKEDEVPATH=$(MAKEPATH)/dev
|
||||
SRCPATH=./src
|
||||
TOOLSPATH=$(BUILDPATH)/tools
|
||||
CHECKENVCMD=checkenv.sh
|
||||
DEVFLAG=true
|
||||
|
||||
# docker parameters
|
||||
DOCKERCMD=$(shell which docker)
|
||||
DOCKERBUILD=$(DOCKERCMD) build
|
||||
DOCKERRMIMAGE=$(DOCKERCMD) rmi
|
||||
DOCKERIMASES=$(DOCKERCMD) images
|
||||
|
||||
# binary
|
||||
UISOURCECODE=$(SRCPATH)/ui
|
||||
UIBINARYPATH=$(MAKEDEVPATH)/ui
|
||||
UIBINARYNAME=harbor_ui
|
||||
JOBSERVICESOURCECODE=$(SRCPATH)/jobservice
|
||||
JOBSERVICEBINARYPATH=$(MAKEDEVPATH)/jobservice
|
||||
JOBSERVICEBINARYNAME=harbor_jobservice
|
||||
|
||||
# ubuntu dockerfile
|
||||
DOCKERFILEPATH=$(MAKEPATH)/ubuntu
|
||||
DOCKERFILEPATH_UI=$(DOCKERFILEPATH)/ui
|
||||
DOCKERFILENAME_UI=Dockerfile
|
||||
DOCKERIMAGENAME_UI=vmware/harbor-ui
|
||||
DOCKERFILEPATH_JOBSERVICE=$(DOCKERFILEPATH)/jobservice
|
||||
DOCKERFILENAME_JOBSERVICE=Dockerfile
|
||||
DOCKERIMAGENAME_JOBSERVICE=vmware/harbor-jobservice
|
||||
DOCKERFILEPATH_LOG=$(DOCKERFILEPATH)/log
|
||||
DOCKERFILENAME_LOG=Dockerfile
|
||||
DOCKERIMAGENAME_LOG=vmware/harbor-log
|
||||
|
||||
# version prepare
|
||||
VERSIONFILEPATH=$(SRCPATH)/views/sections
|
||||
VERSIONFILENAME=header-content.htm
|
||||
GITCMD=$(shell which git)
|
||||
GITTAG=$(GITCMD) describe --tags
|
||||
ifeq ($(DEVFLAG), true)
|
||||
VERSIONTAG=dev
|
||||
else
|
||||
VERSIONTAG=$(shell $(GITTAG))
|
||||
endif
|
||||
|
||||
check_environment:
|
||||
@$(MAKEPATH)/$(CHECKENVCMD)
|
||||
|
||||
build:
|
||||
@echo "building ui container for ubuntu..."
|
||||
$(DOCKERBUILD) -f $(DOCKERFILEPATH_UI)/$(DOCKERFILENAME_UI) -t $(DOCKERIMAGENAME_UI):$(VERSIONTAG) .
|
||||
@echo "Done."
|
||||
|
||||
@echo "building jobservice container for ubuntu..."
|
||||
$(DOCKERBUILD) -f $(DOCKERFILEPATH_JOBSERVICE)/$(DOCKERFILENAME_JOBSERVICE) -t $(DOCKERIMAGENAME_JOBSERVICE):$(VERSIONTAG) .
|
||||
@echo "Done."
|
||||
|
||||
@echo "building log container for ubuntu..."
|
||||
$(DOCKERBUILD) -f $(DOCKERFILEPATH_LOG)/$(DOCKERFILENAME_LOG) -t $(DOCKERIMAGENAME_LOG):$(VERSIONTAG) .
|
||||
@echo "Done."
|
||||
|
||||
cleanimage:
|
||||
@echo "cleaning image for ubuntu..."
|
||||
- $(DOCKERRMIMAGE) -f $(DOCKERIMAGENAME_UI):$(VERSIONTAG)
|
||||
- $(DOCKERRMIMAGE) -f $(DOCKERIMAGENAME_JOBSERVICE):$(VERSIONTAG)
|
||||
- $(DOCKERRMIMAGE) -f $(DOCKERIMAGENAME_LOG):$(VERSIONTAG)
|
||||
|
||||
.PHONY: clean
|
||||
clean: cleanimage
|
||||
|
14
make/ubuntu/jobservice/Dockerfile
Normal file
|
@ -0,0 +1,14 @@
|
|||
FROM golang:1.6.2
|
||||
|
||||
MAINTAINER jiangd@vmware.com
|
||||
|
||||
RUN apt-get update && apt-get install -y libldap2-dev \
|
||||
&& rm -r /var/lib/apt/lists/*
|
||||
|
||||
RUN mkdir /harbor/
|
||||
COPY ./make/dev/jobservice/harbor_jobservice /harbor/
|
||||
|
||||
RUN chmod u+x /harbor/harbor_jobservice
|
||||
|
||||
WORKDIR /harbor/
|
||||
ENTRYPOINT ["/harbor/harbor_jobservice"]
|
18
make/ubuntu/log/Dockerfile
Normal file
|
@ -0,0 +1,18 @@
|
|||
FROM library/ubuntu:14.04
|
||||
|
||||
RUN rm /etc/rsyslog.d/* && rm /etc/rsyslog.conf
|
||||
|
||||
ADD make/common/log/rsyslog.conf /etc/rsyslog.conf
|
||||
|
||||
# rotate logs weekly
|
||||
# notes: file name cannot contain dot, or the script will not run
|
||||
ADD make/common/log/rotate.sh /etc/cron.weekly/rotate
|
||||
|
||||
# rsyslog configuration file for docker
|
||||
ADD make/common/log/rsyslog_docker.conf /etc/rsyslog.d/
|
||||
|
||||
VOLUME /var/log/docker/
|
||||
|
||||
EXPOSE 514
|
||||
|
||||
CMD cron && rsyslogd -n
|
29
make/ubuntu/ui/Dockerfile
Normal file
|
@ -0,0 +1,29 @@
|
|||
FROM golang:1.6.2
|
||||
|
||||
MAINTAINER jiangd@vmware.com
|
||||
|
||||
RUN apt-get update && apt-get install -y libldap2-dev \
|
||||
&& rm -r /var/lib/apt/lists/*
|
||||
|
||||
ENV MYSQL_USR root \
|
||||
MYSQL_PWD root \
|
||||
REGISTRY_URL localhost:5000
|
||||
|
||||
RUN mkdir /harbor/
|
||||
COPY ./make/dev/ui/harbor_ui /harbor/
|
||||
|
||||
COPY ./src/ui/views /harbor/views
|
||||
COPY ./src/ui/static /harbor/static
|
||||
COPY ./src/favicon.ico /harbor/favicon.ico
|
||||
COPY ./make/jsminify.sh /tmp/jsminify.sh
|
||||
|
||||
RUN chmod u+x /harbor/harbor_ui \
|
||||
&& sed -i 's/TLS_CACERT/#TLS_CAERT/g' /etc/ldap/ldap.conf \
|
||||
&& sed -i '$a\TLS_REQCERT allow' /etc/ldap/ldap.conf \
|
||||
&& /tmp/jsminify.sh /harbor/views/sections/script-include.htm /harbor/static/resources/js/harbor.app.min.js /harbor/
|
||||
|
||||
WORKDIR /harbor/
|
||||
ENTRYPOINT ["/harbor/harbor_ui"]
|
||||
|
||||
EXPOSE 80
|
||||
|
|
@ -23,10 +23,10 @@ import (
|
|||
"strconv"
|
||||
|
||||
"github.com/astaxie/beego/validation"
|
||||
"github.com/vmware/harbor/auth"
|
||||
"github.com/vmware/harbor/dao"
|
||||
"github.com/vmware/harbor/models"
|
||||
"github.com/vmware/harbor/utils/log"
|
||||
"github.com/vmware/harbor/src/ui/auth"
|
||||
"github.com/vmware/harbor/src/common/dao"
|
||||
"github.com/vmware/harbor/src/common/models"
|
||||
"github.com/vmware/harbor/src/common/utils/log"
|
||||
|
||||
"github.com/astaxie/beego"
|
||||
)
|
||||
|
@ -156,13 +156,13 @@ func (b *BaseAPI) GetIDFromURL() int64 {
|
|||
return id
|
||||
}
|
||||
|
||||
// set "Link" and "X-Total-Count" header for pagination request
|
||||
func (b *BaseAPI) setPaginationHeader(total, page, pageSize int64) {
|
||||
// SetPaginationHeader set"Link" and "X-Total-Count" header for pagination request
|
||||
func (b *BaseAPI) SetPaginationHeader(total, page, pageSize int64) {
|
||||
b.Ctx.ResponseWriter.Header().Set("X-Total-Count", strconv.FormatInt(total, 10))
|
||||
|
||||
link := ""
|
||||
|
||||
// set previous link
|
||||
// SetPaginationHeader setprevious link
|
||||
if page > 1 && (page-1)*pageSize <= total {
|
||||
u := *(b.Ctx.Request.URL)
|
||||
q := u.Query()
|
||||
|
@ -174,7 +174,7 @@ func (b *BaseAPI) setPaginationHeader(total, page, pageSize int64) {
|
|||
link += fmt.Sprintf("<%s>; rel=\"prev\"", u.String())
|
||||
}
|
||||
|
||||
// set next link
|
||||
// SetPaginationHeader setnext link
|
||||
if pageSize*page < total {
|
||||
u := *(b.Ctx.Request.URL)
|
||||
q := u.Query()
|
||||
|
@ -191,7 +191,8 @@ func (b *BaseAPI) setPaginationHeader(total, page, pageSize int64) {
|
|||
}
|
||||
}
|
||||
|
||||
func (b *BaseAPI) getPaginationParams() (page, pageSize int64) {
|
||||
// GetPaginationParams ...
|
||||
func (b *BaseAPI) GetPaginationParams() (page, pageSize int64) {
|
||||
page, err := b.GetInt64("page", 1)
|
||||
if err != nil || page <= 0 {
|
||||
b.CustomAbort(http.StatusBadRequest, "invalid page")
|
||||
|
@ -210,7 +211,8 @@ func (b *BaseAPI) getPaginationParams() (page, pageSize int64) {
|
|||
return page, pageSize
|
||||
}
|
||||
|
||||
func getIsInsecure() bool {
|
||||
// GetIsInsecure ...
|
||||
func GetIsInsecure() bool {
|
||||
insecure := false
|
||||
|
||||
verifyRemoteCert := os.Getenv("VERIFY_REMOTE_CERT")
|
|
@ -17,9 +17,10 @@ package dao
|
|||
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/vmware/harbor/models"
|
||||
"github.com/vmware/harbor/utils/log"
|
||||
"github.com/vmware/harbor/src/common/models"
|
||||
"github.com/vmware/harbor/src/common/utils/log"
|
||||
)
|
||||
|
||||
// AddAccessLog persists the access logs
|
||||
|
@ -27,13 +28,14 @@ func AddAccessLog(accessLog models.AccessLog) error {
|
|||
o := GetOrmer()
|
||||
p, err := o.Raw(`insert into access_log
|
||||
(user_id, project_id, repo_name, repo_tag, guid, operation, op_time)
|
||||
values (?, ?, ?, ?, ?, ?, now())`).Prepare()
|
||||
values (?, ?, ?, ?, ?, ?, ?)`).Prepare()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer p.Close()
|
||||
|
||||
_, err = p.Exec(accessLog.UserID, accessLog.ProjectID, accessLog.RepoName, accessLog.RepoTag, accessLog.GUID, accessLog.Operation)
|
||||
_, err = p.Exec(accessLog.UserID, accessLog.ProjectID, accessLog.RepoName, accessLog.RepoTag,
|
||||
accessLog.GUID, accessLog.Operation, time.Now())
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -145,8 +147,8 @@ func AccessLog(username, projectName, repoName, repoTag, action string) error {
|
|||
o := GetOrmer()
|
||||
sql := "insert into access_log (user_id, project_id, repo_name, repo_tag, operation, op_time) " +
|
||||
"select (select user_id as user_id from user where username=?), " +
|
||||
"(select project_id as project_id from project where name=?), ?, ?, ?, now() "
|
||||
_, err := o.Raw(sql, username, projectName, repoName, repoTag, action).Exec()
|
||||
"(select project_id as project_id from project where name=?), ?, ?, ?, ? "
|
||||
_, err := o.Raw(sql, username, projectName, repoName, repoTag, action, time.Now()).Exec()
|
||||
|
||||
if err != nil {
|
||||
log.Errorf("error in AccessLog: %v ", err)
|
104
src/common/dao/base.go
Normal file
|
@ -0,0 +1,104 @@
|
|||
/*
|
||||
Copyright (c) 2016 VMware, Inc. All Rights Reserved.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dao
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/astaxie/beego/orm"
|
||||
"github.com/vmware/harbor/src/common/utils/log"
|
||||
)
|
||||
|
||||
// NonExistUserID : if a user does not exist, the ID of the user will be 0.
|
||||
const NonExistUserID = 0
|
||||
|
||||
// Database is an interface of different databases
|
||||
type Database interface {
|
||||
// Name returns the name of database
|
||||
Name() string
|
||||
// String returns the details of database
|
||||
String() string
|
||||
// Register registers the database which will be used
|
||||
Register(alias ...string) error
|
||||
}
|
||||
|
||||
// InitDatabase initializes the database
|
||||
func InitDatabase() {
|
||||
database, err := getDatabase()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
log.Infof("initializing database: %s", database.String())
|
||||
if err := database.Register(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func getDatabase() (db Database, err error) {
|
||||
switch strings.ToLower(os.Getenv("DATABASE")) {
|
||||
case "", "mysql":
|
||||
host, port, usr, pwd, database := getMySQLConnInfo()
|
||||
db = NewMySQL(host, port, usr, pwd, database)
|
||||
case "sqlite":
|
||||
file := getSQLiteConnInfo()
|
||||
db = NewSQLite(file)
|
||||
default:
|
||||
err = fmt.Errorf("invalid database: %s", os.Getenv("DATABASE"))
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// TODO read from config
|
||||
func getMySQLConnInfo() (host, port, username, password, database string) {
|
||||
host = os.Getenv("MYSQL_HOST")
|
||||
port = os.Getenv("MYSQL_PORT")
|
||||
username = os.Getenv("MYSQL_USR")
|
||||
password = os.Getenv("MYSQL_PWD")
|
||||
database = os.Getenv("MYSQL_DATABASE")
|
||||
if len(database) == 0 {
|
||||
database = "registry"
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// TODO read from config
|
||||
func getSQLiteConnInfo() string {
|
||||
file := os.Getenv("SQLITE_FILE")
|
||||
if len(file) == 0 {
|
||||
file = "registry.db"
|
||||
}
|
||||
return file
|
||||
}
|
||||
|
||||
var globalOrm orm.Ormer
|
||||
var once sync.Once
|
||||
|
||||
// GetOrmer :set ormer singleton
|
||||
func GetOrmer() orm.Ormer {
|
||||
once.Do(func() {
|
||||
globalOrm = orm.NewOrm()
|
||||
})
|
||||
return globalOrm
|
||||
}
|
||||
|
||||
func paginateForRawSQL(sql string, limit, offset int64) string {
|
||||
return fmt.Sprintf("%s limit %d offset %d", sql, limit, offset)
|
||||
}
|
|
@ -16,14 +16,14 @@
|
|||
package dao
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/astaxie/beego/orm"
|
||||
"github.com/vmware/harbor/models"
|
||||
"github.com/vmware/harbor/utils/log"
|
||||
"github.com/vmware/harbor/src/common/models"
|
||||
"github.com/vmware/harbor/src/common/utils"
|
||||
"github.com/vmware/harbor/src/common/utils/log"
|
||||
)
|
||||
|
||||
func execUpdate(o orm.Ormer, sql string, params ...interface{}) error {
|
||||
|
@ -45,41 +45,49 @@ func clearUp(username string) {
|
|||
o := orm.NewOrm()
|
||||
o.Begin()
|
||||
|
||||
err = execUpdate(o, `delete pm
|
||||
from project_member pm
|
||||
join user u
|
||||
on pm.user_id = u.user_id
|
||||
where u.username = ?`, username)
|
||||
err = execUpdate(o, `delete
|
||||
from project_member
|
||||
where user_id = (
|
||||
select user_id
|
||||
from user
|
||||
where username = ?
|
||||
) `, username)
|
||||
if err != nil {
|
||||
o.Rollback()
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
err = execUpdate(o, `delete pm
|
||||
from project_member pm
|
||||
join project p
|
||||
on pm.project_id = p.project_id
|
||||
where p.name = ?`, projectName)
|
||||
err = execUpdate(o, `delete
|
||||
from project_member
|
||||
where project_id = (
|
||||
select project_id
|
||||
from project
|
||||
where name = ?
|
||||
)`, projectName)
|
||||
if err != nil {
|
||||
o.Rollback()
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
err = execUpdate(o, `delete al
|
||||
from access_log al
|
||||
join user u
|
||||
on al.user_id = u.user_id
|
||||
where u.username = ?`, username)
|
||||
err = execUpdate(o, `delete
|
||||
from access_log
|
||||
where user_id = (
|
||||
select user_id
|
||||
from user
|
||||
where username = ?
|
||||
)`, username)
|
||||
if err != nil {
|
||||
o.Rollback()
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
err = execUpdate(o, `delete al
|
||||
from access_log al
|
||||
join project p
|
||||
on al.project_id = p.project_id
|
||||
where p.name = ?`, projectName)
|
||||
err = execUpdate(o, `delete
|
||||
from access_log
|
||||
where project_id = (
|
||||
select project_id
|
||||
from project
|
||||
where name = ?
|
||||
)`, projectName)
|
||||
if err != nil {
|
||||
o.Rollback()
|
||||
log.Error(err)
|
||||
|
@ -127,6 +135,31 @@ const publicityOn = 1
|
|||
const publicityOff = 0
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
databases := []string{"mysql", "sqlite"}
|
||||
for _, database := range databases {
|
||||
log.Infof("run test cases for database: %s", database)
|
||||
|
||||
result := 1
|
||||
switch database {
|
||||
case "mysql":
|
||||
result = testForMySQL(m)
|
||||
case "sqlite":
|
||||
result = testForSQLite(m)
|
||||
default:
|
||||
log.Fatalf("invalid database: %s", database)
|
||||
}
|
||||
|
||||
if result != 0 {
|
||||
os.Exit(result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testForMySQL(m *testing.M) int {
|
||||
db := os.Getenv("DATABASE")
|
||||
defer os.Setenv("DATABASE", db)
|
||||
|
||||
os.Setenv("DATABASE", "mysql")
|
||||
|
||||
dbHost := os.Getenv("DB_HOST")
|
||||
if len(dbHost) == 0 {
|
||||
|
@ -148,11 +181,51 @@ func TestMain(m *testing.M) {
|
|||
os.Setenv("MYSQL_PORT", dbPort)
|
||||
os.Setenv("MYSQL_USR", dbUser)
|
||||
os.Setenv("MYSQL_PWD", dbPassword)
|
||||
os.Setenv("AUTH_MODE", "db_auth")
|
||||
InitDB()
|
||||
clearUp(username)
|
||||
os.Exit(m.Run())
|
||||
|
||||
return testForAll(m)
|
||||
}
|
||||
|
||||
func testForSQLite(m *testing.M) int {
|
||||
db := os.Getenv("DATABASE")
|
||||
defer os.Setenv("DATABASE", db)
|
||||
|
||||
os.Setenv("DATABASE", "sqlite")
|
||||
|
||||
file := os.Getenv("SQLITE_FILE")
|
||||
if len(file) == 0 {
|
||||
os.Setenv("SQLITE_FILE", "/registry.db")
|
||||
defer os.Setenv("SQLITE_FILE", "")
|
||||
}
|
||||
|
||||
return testForAll(m)
|
||||
}
|
||||
|
||||
func testForAll(m *testing.M) int {
|
||||
os.Setenv("AUTH_MODE", "db_auth")
|
||||
initDatabaseForTest()
|
||||
clearUp(username)
|
||||
|
||||
return m.Run()
|
||||
}
|
||||
|
||||
var defaultRegistered = false
|
||||
|
||||
func initDatabaseForTest() {
|
||||
database, err := getDatabase()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
log.Infof("initializing database: %s", database.String())
|
||||
|
||||
alias := database.Name()
|
||||
if !defaultRegistered {
|
||||
defaultRegistered = true
|
||||
alias = "default"
|
||||
}
|
||||
if err := database.Register(alias); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegister(t *testing.T) {
|
||||
|
@ -332,12 +405,9 @@ func TestListUsers(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestResetUserPassword(t *testing.T) {
|
||||
uuid, err := GenerateRandomString()
|
||||
if err != nil {
|
||||
t.Errorf("Error occurred in GenerateRandomString: %v", err)
|
||||
}
|
||||
uuid := utils.GenerateRandomString()
|
||||
|
||||
err = UpdateUserResetUUID(models.User{ResetUUID: uuid, Email: currentUser.Email})
|
||||
err := UpdateUserResetUUID(models.User{ResetUUID: uuid, Email: currentUser.Email})
|
||||
if err != nil {
|
||||
t.Errorf("Error occurred in UpdateUserResetUuid: %v", err)
|
||||
}
|
||||
|
@ -358,7 +428,13 @@ func TestResetUserPassword(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestChangeUserPassword(t *testing.T) {
|
||||
err := ChangeUserPassword(models.User{UserID: currentUser.UserID, Password: "NewHarborTester12345", Salt: currentUser.Salt})
|
||||
user := models.User{UserID: currentUser.UserID}
|
||||
query, err := GetUser(user)
|
||||
if err != nil {
|
||||
t.Errorf("Error occurred when get user salt")
|
||||
}
|
||||
currentUser.Salt = query.Salt
|
||||
err = ChangeUserPassword(models.User{UserID: currentUser.UserID, Password: "NewHarborTester12345", Salt: currentUser.Salt})
|
||||
if err != nil {
|
||||
t.Errorf("Error occurred in ChangeUserPassword: %v", err)
|
||||
}
|
||||
|
@ -374,7 +450,14 @@ func TestChangeUserPassword(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestChangeUserPasswordWithOldPassword(t *testing.T) {
|
||||
err := ChangeUserPassword(models.User{UserID: currentUser.UserID, Password: "NewerHarborTester12345", Salt: currentUser.Salt}, "NewHarborTester12345")
|
||||
user := models.User{UserID: currentUser.UserID}
|
||||
query, err := GetUser(user)
|
||||
if err != nil {
|
||||
t.Errorf("Error occurred when get user salt")
|
||||
}
|
||||
currentUser.Salt = query.Salt
|
||||
|
||||
err = ChangeUserPassword(models.User{UserID: currentUser.UserID, Password: "NewerHarborTester12345", Salt: currentUser.Salt}, "NewHarborTester12345")
|
||||
if err != nil {
|
||||
t.Errorf("Error occurred in ChangeUserPassword: %v", err)
|
||||
}
|
||||
|
@ -1494,39 +1577,6 @@ func TestGetOrmer(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestDeleteProject(t *testing.T) {
|
||||
name := "project_for_test"
|
||||
project := models.Project{
|
||||
OwnerID: currentUser.UserID,
|
||||
Name: name,
|
||||
}
|
||||
|
||||
id, err := AddProject(project)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to add project: %v", err)
|
||||
}
|
||||
|
||||
if err = DeleteProject(id); err != nil {
|
||||
t.Fatalf("failed to delete project: %v", err)
|
||||
}
|
||||
|
||||
p := &models.Project{}
|
||||
if err = GetOrmer().Raw(`select * from project where project_id = ?`, id).
|
||||
QueryRow(p); err != nil {
|
||||
t.Fatalf("failed to get project: %v", err)
|
||||
}
|
||||
|
||||
if p.Deleted != 1 {
|
||||
t.Errorf("unexpeced deleted column: %d != %d", p.Deleted, 1)
|
||||
}
|
||||
|
||||
deletedName := fmt.Sprintf("%s#%d", name, id)
|
||||
if p.Name != deletedName {
|
||||
t.Errorf("unexpected name: %s != %s", p.Name, deletedName)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestAddRepository(t *testing.T) {
|
||||
repoRecord := models.RepoRecord{
|
||||
Name: currentProject.Name + "/" + repositoryName,
|
101
src/common/dao/mysql.go
Normal file
|
@ -0,0 +1,101 @@
|
|||
/*
|
||||
Copyright (c) 2016 VMware, Inc. All Rights Reserved.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dao
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"time"
|
||||
|
||||
"github.com/astaxie/beego/orm"
|
||||
_ "github.com/go-sql-driver/mysql" //register mysql driver
|
||||
"github.com/vmware/harbor/src/common/utils/log"
|
||||
)
|
||||
|
||||
type mysql struct {
|
||||
host string
|
||||
port string
|
||||
usr string
|
||||
pwd string
|
||||
database string
|
||||
}
|
||||
|
||||
// NewMySQL returns an instance of mysql
|
||||
func NewMySQL(host, port, usr, pwd, database string) Database {
|
||||
return &mysql{
|
||||
host: host,
|
||||
port: port,
|
||||
usr: usr,
|
||||
pwd: pwd,
|
||||
database: database,
|
||||
}
|
||||
}
|
||||
|
||||
// Register registers MySQL as the underlying database used
|
||||
func (m *mysql) Register(alias ...string) error {
|
||||
if err := m.testConn(m.host, m.port); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := orm.RegisterDriver("mysql", orm.DRMySQL); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
an := "default"
|
||||
if len(alias) != 0 {
|
||||
an = alias[0]
|
||||
}
|
||||
conn := fmt.Sprintf("%s:%s@tcp(%s:%s)/%s", m.usr,
|
||||
m.pwd, m.host, m.port, m.database)
|
||||
return orm.RegisterDataBase(an, "mysql", conn)
|
||||
}
|
||||
|
||||
func (m *mysql) testConn(host, port string) error {
|
||||
ch := make(chan int, 1)
|
||||
go func() {
|
||||
var err error
|
||||
var c net.Conn
|
||||
for {
|
||||
c, err = net.DialTimeout("tcp", host+":"+port, 20*time.Second)
|
||||
if err == nil {
|
||||
c.Close()
|
||||
ch <- 1
|
||||
} else {
|
||||
log.Errorf("failed to connect to db, retry after 2 seconds :%v", err)
|
||||
time.Sleep(2 * time.Second)
|
||||
}
|
||||
}
|
||||
}()
|
||||
select {
|
||||
case <-ch:
|
||||
return nil
|
||||
case <-time.After(60 * time.Second):
|
||||
return errors.New("failed to connect to database after 60 seconds")
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the name of MySQL
|
||||
func (m *mysql) Name() string {
|
||||
return "MySQL"
|
||||
}
|
||||
|
||||
// String returns the details of database
|
||||
func (m *mysql) String() string {
|
||||
return fmt.Sprintf("type-%s host-%s port-%s user-%s database-%s",
|
||||
m.Name(), m.host, m.port, m.usr, m.database)
|
||||
}
|
|
@ -16,12 +16,12 @@
|
|||
package dao
|
||||
|
||||
import (
|
||||
"github.com/vmware/harbor/models"
|
||||
"github.com/vmware/harbor/src/common/models"
|
||||
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/vmware/harbor/utils/log"
|
||||
"github.com/vmware/harbor/src/common/utils/log"
|
||||
)
|
||||
|
||||
//TODO:transaction, return err
|
||||
|
@ -279,9 +279,16 @@ func getProjects(userID int, name string, args ...int64) ([]models.Project, erro
|
|||
|
||||
// DeleteProject ...
|
||||
func DeleteProject(id int64) error {
|
||||
project, err := GetProjectByID(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
name := fmt.Sprintf("%s#%d", project.Name, project.ProjectID)
|
||||
|
||||
sql := `update project
|
||||
set deleted = 1, name = concat(name,"#",project_id)
|
||||
set deleted = 1, name = ?
|
||||
where project_id = ?`
|
||||
_, err := GetOrmer().Raw(sql, id).Exec()
|
||||
_, err = GetOrmer().Raw(sql, name, id).Exec()
|
||||
return err
|
||||
}
|
81
src/common/dao/project_test.go
Normal file
|
@ -0,0 +1,81 @@
|
|||
/*
|
||||
Copyright (c) 2016 VMware, Inc. All Rights Reserved.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dao
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/vmware/harbor/src/common/models"
|
||||
)
|
||||
|
||||
func TestDeleteProject(t *testing.T) {
|
||||
name := "project_for_test"
|
||||
project := models.Project{
|
||||
OwnerID: currentUser.UserID,
|
||||
Name: name,
|
||||
}
|
||||
|
||||
id, err := AddProject(project)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to add project: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := delProjPermanent(id); err != nil {
|
||||
t.Errorf("failed to clear up project %d: %v", id, err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err = DeleteProject(id); err != nil {
|
||||
t.Fatalf("failed to delete project: %v", err)
|
||||
}
|
||||
|
||||
p := &models.Project{}
|
||||
if err = GetOrmer().Raw(`select * from project where project_id = ?`, id).
|
||||
QueryRow(p); err != nil {
|
||||
t.Fatalf("failed to get project: %v", err)
|
||||
}
|
||||
|
||||
if p.Deleted != 1 {
|
||||
t.Errorf("unexpeced deleted column: %d != %d", p.Deleted, 1)
|
||||
}
|
||||
|
||||
deletedName := fmt.Sprintf("%s#%d", name, id)
|
||||
if p.Name != deletedName {
|
||||
t.Errorf("unexpected name: %s != %s", p.Name, deletedName)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func delProjPermanent(id int64) error {
|
||||
_, err := GetOrmer().QueryTable("access_log").
|
||||
Filter("ProjectID", id).
|
||||
Delete()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = GetOrmer().Raw(`delete from project_member
|
||||
where project_id = ?`, id).Exec()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = GetOrmer().QueryTable("project").
|
||||
Filter("ProjectID", id).
|
||||
Delete()
|
||||
return err
|
||||
}
|
|
@ -16,7 +16,7 @@
|
|||
package dao
|
||||
|
||||
import (
|
||||
"github.com/vmware/harbor/models"
|
||||
"github.com/vmware/harbor/src/common/models"
|
||||
)
|
||||
|
||||
// AddProjectMember inserts a record to table project_member
|
|
@ -19,8 +19,8 @@ import (
|
|||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/vmware/harbor/models"
|
||||
"github.com/vmware/harbor/utils"
|
||||
"github.com/vmware/harbor/src/common/models"
|
||||
"github.com/vmware/harbor/src/common/utils"
|
||||
)
|
||||
|
||||
// Register is used for user to register, the password is encrypted before the record is inserted into database.
|
||||
|
@ -32,10 +32,7 @@ func Register(user models.User) (int64, error) {
|
|||
}
|
||||
defer p.Close()
|
||||
|
||||
salt, err := GenerateRandomString()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
salt := utils.GenerateRandomString()
|
||||
|
||||
now := time.Now()
|
||||
r, err := p.Exec(user.Username, utils.Encrypt(user.Password, salt), user.Realname, user.Email, user.Comment, salt, user.HasAdminRole, now, now)
|
|
@ -22,7 +22,7 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/astaxie/beego/orm"
|
||||
"github.com/vmware/harbor/models"
|
||||
"github.com/vmware/harbor/src/common/models"
|
||||
)
|
||||
|
||||
// AddRepTarget ...
|
||||
|
@ -76,7 +76,8 @@ func DeleteRepTarget(id int64) error {
|
|||
// UpdateRepTarget ...
|
||||
func UpdateRepTarget(target models.RepTarget) error {
|
||||
o := GetOrmer()
|
||||
_, err := o.Update(&target, "URL", "Name", "Username", "Password")
|
||||
target.UpdateTime = time.Now()
|
||||
_, err := o.Update(&target, "URL", "Name", "Username", "Password", "UpdateTime")
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -105,18 +106,23 @@ func FilterRepTargets(name string) ([]*models.RepTarget, error) {
|
|||
// AddRepPolicy ...
|
||||
func AddRepPolicy(policy models.RepPolicy) (int64, error) {
|
||||
o := GetOrmer()
|
||||
sqlTpl := `insert into replication_policy (name, project_id, target_id, enabled, description, cron_str, start_time, creation_time, update_time ) values (?, ?, ?, ?, ?, ?, %s, NOW(), NOW())`
|
||||
var sql string
|
||||
if policy.Enabled == 1 {
|
||||
sql = fmt.Sprintf(sqlTpl, "NOW()")
|
||||
} else {
|
||||
sql = fmt.Sprintf(sqlTpl, "NULL")
|
||||
}
|
||||
sql := `insert into replication_policy (name, project_id, target_id, enabled, description, cron_str, start_time, creation_time, update_time ) values (?, ?, ?, ?, ?, ?, ?, ?, ?)`
|
||||
p, err := o.Raw(sql).Prepare()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
r, err := p.Exec(policy.Name, policy.ProjectID, policy.TargetID, policy.Enabled, policy.Description, policy.CronStr)
|
||||
|
||||
params := []interface{}{}
|
||||
params = append(params, policy.Name, policy.ProjectID, policy.TargetID, policy.Enabled, policy.Description, policy.CronStr)
|
||||
now := time.Now()
|
||||
if policy.Enabled == 1 {
|
||||
params = append(params, now)
|
||||
} else {
|
||||
params = append(params, nil)
|
||||
}
|
||||
params = append(params, now, now)
|
||||
|
||||
r, err := p.Exec(params...)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
@ -241,7 +247,8 @@ func GetRepPolicyByProjectAndTarget(projectID, targetID int64) ([]*models.RepPol
|
|||
// UpdateRepPolicy ...
|
||||
func UpdateRepPolicy(policy *models.RepPolicy) error {
|
||||
o := GetOrmer()
|
||||
_, err := o.Update(policy, "TargetID", "Name", "Enabled", "Description", "CronStr")
|
||||
policy.UpdateTime = time.Now()
|
||||
_, err := o.Update(policy, "TargetID", "Name", "Enabled", "Description", "CronStr", "UpdateTime")
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -249,8 +256,9 @@ func UpdateRepPolicy(policy *models.RepPolicy) error {
|
|||
func DeleteRepPolicy(id int64) error {
|
||||
o := GetOrmer()
|
||||
policy := &models.RepPolicy{
|
||||
ID: id,
|
||||
Deleted: 1,
|
||||
ID: id,
|
||||
Deleted: 1,
|
||||
UpdateTime: time.Now(),
|
||||
}
|
||||
_, err := o.Update(policy, "Deleted")
|
||||
return err
|
||||
|
@ -260,8 +268,9 @@ func DeleteRepPolicy(id int64) error {
|
|||
func UpdateRepPolicyEnablement(id int64, enabled int) error {
|
||||
o := GetOrmer()
|
||||
p := models.RepPolicy{
|
||||
ID: id,
|
||||
Enabled: enabled,
|
||||
ID: id,
|
||||
Enabled: enabled,
|
||||
UpdateTime: time.Now(),
|
||||
}
|
||||
|
||||
var err error
|
||||
|
@ -386,10 +395,11 @@ func DeleteRepJob(id int64) error {
|
|||
func UpdateRepJobStatus(id int64, status string) error {
|
||||
o := GetOrmer()
|
||||
j := models.RepJob{
|
||||
ID: id,
|
||||
Status: status,
|
||||
ID: id,
|
||||
Status: status,
|
||||
UpdateTime: time.Now(),
|
||||
}
|
||||
num, err := o.Update(&j, "Status")
|
||||
num, err := o.Update(&j, "Status", "UpdateTime")
|
||||
if num == 0 {
|
||||
err = fmt.Errorf("Failed to update replication job with id: %d %s", id, err.Error())
|
||||
}
|
||||
|
@ -399,8 +409,8 @@ func UpdateRepJobStatus(id int64, status string) error {
|
|||
// ResetRunningJobs update all running jobs status to pending
|
||||
func ResetRunningJobs() error {
|
||||
o := GetOrmer()
|
||||
sql := fmt.Sprintf("update replication_job set status = '%s' where status = '%s'", models.JobPending, models.JobRunning)
|
||||
_, err := o.Raw(sql).Exec()
|
||||
sql := fmt.Sprintf("update replication_job set status = '%s', update_time = ? where status = '%s'", models.JobPending, models.JobRunning)
|
||||
_, err := o.Raw(sql, time.Now()).Exec()
|
||||
return err
|
||||
}
|
||||
|
|
@ -17,9 +17,10 @@ package dao
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/astaxie/beego/orm"
|
||||
"github.com/vmware/harbor/models"
|
||||
"github.com/vmware/harbor/src/common/models"
|
||||
)
|
||||
|
||||
// AddRepository adds a repo to the database.
|
||||
|
@ -27,9 +28,10 @@ func AddRepository(repo models.RepoRecord) error {
|
|||
o := GetOrmer()
|
||||
sql := "insert into repository (owner_id, project_id, name, description, pull_count, star_count, creation_time, update_time) " +
|
||||
"select (select user_id as owner_id from user where username=?), " +
|
||||
"(select project_id as project_id from project where name=?), ?, ?, ?, ?, NOW(), NULL "
|
||||
"(select project_id as project_id from project where name=?), ?, ?, ?, ?, ?, NULL "
|
||||
|
||||
_, err := o.Raw(sql, repo.OwnerName, repo.ProjectName, repo.Name, repo.Description, repo.PullCount, repo.StarCount).Exec()
|
||||
_, err := o.Raw(sql, repo.OwnerName, repo.ProjectName, repo.Name, repo.Description,
|
||||
repo.PullCount, repo.StarCount, time.Now()).Exec()
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -62,6 +64,7 @@ func DeleteRepository(name string) error {
|
|||
// UpdateRepository ...
|
||||
func UpdateRepository(repo models.RepoRecord) error {
|
||||
o := GetOrmer()
|
||||
repo.UpdateTime = time.Now()
|
||||
_, err := o.Update(&repo)
|
||||
return err
|
||||
}
|
||||
|
@ -71,7 +74,8 @@ func IncreasePullCount(name string) (err error) {
|
|||
o := GetOrmer()
|
||||
num, err := o.QueryTable("repository").Filter("name", name).Update(
|
||||
orm.Params{
|
||||
"pull_count": orm.ColValue(orm.ColAdd, 1),
|
||||
"pull_count": orm.ColValue(orm.ColAdd, 1),
|
||||
"update_time": time.Now(),
|
||||
})
|
||||
if num == 0 {
|
||||
err = fmt.Errorf("Failed to increase repository pull count with name: %s %s", name, err.Error())
|
|
@ -18,7 +18,7 @@ package dao
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/vmware/harbor/models"
|
||||
"github.com/vmware/harbor/src/common/models"
|
||||
)
|
||||
|
||||
var (
|
|
@ -19,7 +19,7 @@ import (
|
|||
"fmt"
|
||||
|
||||
"github.com/astaxie/beego/orm"
|
||||
"github.com/vmware/harbor/models"
|
||||
"github.com/vmware/harbor/src/common/models"
|
||||
)
|
||||
|
||||
// GetUserProjectRoles returns roles that the user has according to the project.
|
61
src/common/dao/sqlite.go
Normal file
|
@ -0,0 +1,61 @@
|
|||
/*
|
||||
Copyright (c) 2016 VMware, Inc. All Rights Reserved.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dao
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/astaxie/beego/orm"
|
||||
_ "github.com/mattn/go-sqlite3" //register sqlite driver
|
||||
)
|
||||
|
||||
type sqlite struct {
|
||||
file string
|
||||
}
|
||||
|
||||
// NewSQLite returns an instance of sqlite
|
||||
func NewSQLite(file string) Database {
|
||||
return &sqlite{
|
||||
file: file,
|
||||
}
|
||||
}
|
||||
|
||||
// Register registers SQLite as the underlying database used
|
||||
func (s *sqlite) Register(alias ...string) error {
|
||||
if err := orm.RegisterDriver("sqlite3", orm.DRSqlite); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
an := "default"
|
||||
if len(alias) != 0 {
|
||||
an = alias[0]
|
||||
}
|
||||
if err := orm.RegisterDataBase(an, "sqlite3", s.file); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Name returns the name of SQLite
|
||||
func (s *sqlite) Name() string {
|
||||
return "SQLite"
|
||||
}
|
||||
|
||||
// String returns the details of database
|
||||
func (s *sqlite) String() string {
|
||||
return fmt.Sprintf("type-%s file:%s", s.Name(), s.file)
|
||||
}
|
|
@ -18,11 +18,12 @@ package dao
|
|||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/vmware/harbor/models"
|
||||
"github.com/vmware/harbor/utils"
|
||||
"github.com/vmware/harbor/src/common/models"
|
||||
"github.com/vmware/harbor/src/common/utils"
|
||||
|
||||
"github.com/vmware/harbor/utils/log"
|
||||
"github.com/vmware/harbor/src/common/utils/log"
|
||||
)
|
||||
|
||||
// GetUser ...
|
||||
|
@ -136,11 +137,12 @@ func ChangeUserPassword(u models.User, oldPassword ...string) (err error) {
|
|||
o := GetOrmer()
|
||||
|
||||
var r sql.Result
|
||||
salt := utils.GenerateRandomString()
|
||||
if len(oldPassword) == 0 {
|
||||
//In some cases, it may no need to check old password, just as Linux change password policies.
|
||||
r, err = o.Raw(`update user set password=?, salt=? where user_id=?`, utils.Encrypt(u.Password, u.Salt), u.Salt, u.UserID).Exec()
|
||||
r, err = o.Raw(`update user set password=?, salt=? where user_id=?`, utils.Encrypt(u.Password, salt), salt, u.UserID).Exec()
|
||||
} else {
|
||||
r, err = o.Raw(`update user set password=?, salt=? where user_id=? and password = ?`, utils.Encrypt(u.Password, u.Salt), u.Salt, u.UserID, utils.Encrypt(oldPassword[0], u.Salt)).Exec()
|
||||
r, err = o.Raw(`update user set password=?, salt=? where user_id=? and password = ?`, utils.Encrypt(u.Password, salt), salt, u.UserID, utils.Encrypt(oldPassword[0], u.Salt)).Exec()
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
@ -214,10 +216,20 @@ func CheckUserPassword(query models.User) (*models.User, error) {
|
|||
// DeleteUser ...
|
||||
func DeleteUser(userID int) error {
|
||||
o := GetOrmer()
|
||||
_, err := o.Raw(`update user
|
||||
set deleted = 1, username = concat(username, "#", user_id),
|
||||
email = concat(email, "#", user_id)
|
||||
where user_id = ?`, userID).Exec()
|
||||
|
||||
user, err := GetUser(models.User{
|
||||
UserID: userID,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
name := fmt.Sprintf("%s#%d", user.Username, user.UserID)
|
||||
email := fmt.Sprintf("%s#%d", user.Email, user.UserID)
|
||||
|
||||
_, err = o.Raw(`update user
|
||||
set deleted = 1, username = ?, email = ?
|
||||
where user_id = ?`, name, email, userID).Exec()
|
||||
return err
|
||||
}
|
||||
|