mirror of
https://github.com/goharbor/harbor
synced 2025-04-19 17:33:59 +00:00
resolve conflict
This commit is contained in:
commit
582deea9e7
|
@ -3,12 +3,12 @@ package job
|
||||||
const (
|
const (
|
||||||
//ImageScanJob is name of scan job it will be used as key to register to job service.
|
//ImageScanJob is name of scan job it will be used as key to register to job service.
|
||||||
ImageScanJob = "IMAGE_SCAN"
|
ImageScanJob = "IMAGE_SCAN"
|
||||||
// ImageReplicationTransfer : the name of replication transfer job in job service
|
|
||||||
ImageReplicationTransfer = "IMAGE_REPLICATION_TRANSFER"
|
|
||||||
// ImageReplicationDelete : the name of replication delete job in job service
|
|
||||||
ImageReplicationDelete = "IMAGE_REPLICATION_DELETE"
|
|
||||||
// ImagePeriodReplication : the name of period replication job in job service
|
|
||||||
ImagePeriodReplication = "IMAGE_PERIOD_REPLICATION"
|
|
||||||
// GenericKind marks the job as a generic job, it will be contained in job metadata and passed to job service.
|
// GenericKind marks the job as a generic job, it will be contained in job metadata and passed to job service.
|
||||||
GenericKind = "Generic"
|
GenericKind = "Generic"
|
||||||
|
// ImageTransfer : the name of image transfer job in job service
|
||||||
|
ImageTransfer = "IMAGE_TRANSFER"
|
||||||
|
// ImageDelete : the name of image delete job in job service
|
||||||
|
ImageDelete = "IMAGE_DELETE"
|
||||||
|
// ImageReplicate : the name of image replicate job in job service
|
||||||
|
ImageReplicate = "IMAGE_REPLICATE"
|
||||||
)
|
)
|
||||||
|
|
|
@ -9,9 +9,9 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/vmware/harbor/src/common/utils/log"
|
|
||||||
"github.com/vmware/harbor/src/jobservice_v2/config"
|
"github.com/vmware/harbor/src/jobservice_v2/config"
|
||||||
"github.com/vmware/harbor/src/jobservice_v2/env"
|
"github.com/vmware/harbor/src/jobservice_v2/env"
|
||||||
|
"github.com/vmware/harbor/src/jobservice_v2/logger"
|
||||||
)
|
)
|
||||||
|
|
||||||
//Server serves the http requests.
|
//Server serves the http requests.
|
||||||
|
@ -91,7 +91,7 @@ func (s *Server) Start() {
|
||||||
var err error
|
var err error
|
||||||
defer func() {
|
defer func() {
|
||||||
s.context.WG.Done()
|
s.context.WG.Done()
|
||||||
log.Infof("API server is gracefully shutdown")
|
logger.Infof("API server is gracefully shutdown")
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if s.config.Protocol == config.JobServiceProtocolHTTPS {
|
if s.config.Protocol == config.JobServiceProtocolHTTPS {
|
||||||
|
@ -110,13 +110,13 @@ func (s *Server) Start() {
|
||||||
func (s *Server) Stop() {
|
func (s *Server) Stop() {
|
||||||
go func() {
|
go func() {
|
||||||
defer func() {
|
defer func() {
|
||||||
log.Info("Stop API server done!")
|
logger.Info("Stop API server done!")
|
||||||
}()
|
}()
|
||||||
shutDownCtx, cancel := context.WithTimeout(s.context.SystemContext, 10*time.Second)
|
shutDownCtx, cancel := context.WithTimeout(s.context.SystemContext, 10*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
if err := s.httpServer.Shutdown(shutDownCtx); err != nil {
|
if err := s.httpServer.Shutdown(shutDownCtx); err != nil {
|
||||||
log.Errorf("Shutdown API server failed with error: %s\n", err)
|
logger.Errorf("Shutdown API server failed with error: %s\n", err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,7 +5,6 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/vmware/harbor/src/common/utils/log"
|
"github.com/vmware/harbor/src/common/utils/log"
|
||||||
"github.com/vmware/harbor/src/jobservice_v2/logger"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
//JobLogger is an implementation of logger.Interface.
|
//JobLogger is an implementation of logger.Interface.
|
||||||
|
@ -17,7 +16,7 @@ type JobLogger struct {
|
||||||
|
|
||||||
//New logger
|
//New logger
|
||||||
//nil might be returned
|
//nil might be returned
|
||||||
func New(logPath string, level string) logger.Interface {
|
func New(logPath string, level string) *JobLogger {
|
||||||
f, err := os.OpenFile(logPath, os.O_CREATE|os.O_WRONLY, 0644)
|
f, err := os.OpenFile(logPath, os.O_CREATE|os.O_WRONLY, 0644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
return nil
|
||||||
|
|
74
src/jobservice_v2/job/impl/logger/service_logger.go
Normal file
74
src/jobservice_v2/job/impl/logger/service_logger.go
Normal file
|
@ -0,0 +1,74 @@
|
||||||
|
package logger
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/vmware/harbor/src/common/utils/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
//ServiceLogger is an implementation of logger.Interface.
|
||||||
|
//It used to log info in workerpool components.
|
||||||
|
type ServiceLogger struct {
|
||||||
|
backendLogger *log.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
//NewServiceLogger to create new logger for job service
|
||||||
|
//nil might be returned
|
||||||
|
func NewServiceLogger(level string) *ServiceLogger {
|
||||||
|
logLevel := parseLevel(level)
|
||||||
|
backendLogger := log.New(os.Stdout, log.NewTextFormatter(), logLevel)
|
||||||
|
|
||||||
|
return &ServiceLogger{
|
||||||
|
backendLogger: backendLogger,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//Debug ...
|
||||||
|
func (sl *ServiceLogger) Debug(v ...interface{}) {
|
||||||
|
sl.backendLogger.Debug(v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
//Debugf with format
|
||||||
|
func (sl *ServiceLogger) Debugf(format string, v ...interface{}) {
|
||||||
|
sl.backendLogger.Debugf(format, v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
//Info ...
|
||||||
|
func (sl *ServiceLogger) Info(v ...interface{}) {
|
||||||
|
sl.backendLogger.Info(v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
//Infof with format
|
||||||
|
func (sl *ServiceLogger) Infof(format string, v ...interface{}) {
|
||||||
|
sl.backendLogger.Infof(format, v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
//Warning ...
|
||||||
|
func (sl *ServiceLogger) Warning(v ...interface{}) {
|
||||||
|
sl.backendLogger.Warning(v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
//Warningf with format
|
||||||
|
func (sl *ServiceLogger) Warningf(format string, v ...interface{}) {
|
||||||
|
sl.backendLogger.Warningf(format, v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
//Error ...
|
||||||
|
func (sl *ServiceLogger) Error(v ...interface{}) {
|
||||||
|
sl.backendLogger.Error(v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
//Errorf with format
|
||||||
|
func (sl *ServiceLogger) Errorf(format string, v ...interface{}) {
|
||||||
|
sl.backendLogger.Errorf(format, v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
//Fatal error
|
||||||
|
func (sl *ServiceLogger) Fatal(v ...interface{}) {
|
||||||
|
sl.backendLogger.Fatal(v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
//Fatalf error
|
||||||
|
func (sl *ServiceLogger) Fatalf(format string, v ...interface{}) {
|
||||||
|
sl.backendLogger.Fatalf(format, v...)
|
||||||
|
}
|
|
@ -4,9 +4,9 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
common_http "github.com/vmware/harbor/src/common/http"
|
common_http "github.com/vmware/harbor/src/common/http"
|
||||||
"github.com/vmware/harbor/src/common/utils/log"
|
|
||||||
"github.com/vmware/harbor/src/common/utils/registry/auth"
|
"github.com/vmware/harbor/src/common/utils/registry/auth"
|
||||||
"github.com/vmware/harbor/src/jobservice_v2/env"
|
"github.com/vmware/harbor/src/jobservice_v2/env"
|
||||||
|
"github.com/vmware/harbor/src/jobservice_v2/logger"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Deleter deletes repository or images on the destination registry
|
// Deleter deletes repository or images on the destination registry
|
||||||
|
@ -14,7 +14,7 @@ type Deleter struct {
|
||||||
ctx env.JobContext
|
ctx env.JobContext
|
||||||
repository *repository
|
repository *repository
|
||||||
dstRegistry *registry
|
dstRegistry *registry
|
||||||
logger *log.Logger
|
logger logger.Interface
|
||||||
retry bool
|
retry bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -49,8 +49,7 @@ func (d *Deleter) run(ctx env.JobContext, params map[string]interface{}) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Deleter) init(ctx env.JobContext, params map[string]interface{}) error {
|
func (d *Deleter) init(ctx env.JobContext, params map[string]interface{}) error {
|
||||||
// TODO
|
d.logger = ctx.GetLogger()
|
||||||
d.logger = log.DefaultLogger()
|
|
||||||
d.ctx = ctx
|
d.ctx = ctx
|
||||||
|
|
||||||
if canceled(d.ctx) {
|
if canceled(d.ctx) {
|
||||||
|
|
25
src/jobservice_v2/job/impl/replication/delete_test.go
Normal file
25
src/jobservice_v2/job/impl/replication/delete_test.go
Normal file
|
@ -0,0 +1,25 @@
|
||||||
|
package replication
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMaxFailsOfDeleter(t *testing.T) {
|
||||||
|
d := &Deleter{}
|
||||||
|
assert.Equal(t, uint(3), d.MaxFails())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestValidateOfDeleter(t *testing.T) {
|
||||||
|
d := &Deleter{}
|
||||||
|
require.Nil(t, d.Validate(nil))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestShouldRetryOfDeleter(t *testing.T) {
|
||||||
|
d := &Deleter{}
|
||||||
|
assert.False(t, d.ShouldRetry())
|
||||||
|
d.retry = true
|
||||||
|
assert.True(t, d.ShouldRetry())
|
||||||
|
}
|
|
@ -1,49 +1,36 @@
|
||||||
package replication
|
package replication
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/docker/distribution"
|
|
||||||
"github.com/docker/distribution/manifest/schema1"
|
|
||||||
"github.com/docker/distribution/manifest/schema2"
|
|
||||||
common_http "github.com/vmware/harbor/src/common/http"
|
common_http "github.com/vmware/harbor/src/common/http"
|
||||||
"github.com/vmware/harbor/src/common/http/modifier"
|
|
||||||
"github.com/vmware/harbor/src/common/models"
|
"github.com/vmware/harbor/src/common/models"
|
||||||
"github.com/vmware/harbor/src/common/utils"
|
|
||||||
"github.com/vmware/harbor/src/common/utils/log"
|
|
||||||
reg "github.com/vmware/harbor/src/common/utils/registry"
|
reg "github.com/vmware/harbor/src/common/utils/registry"
|
||||||
"github.com/vmware/harbor/src/common/utils/registry/auth"
|
"github.com/vmware/harbor/src/common/utils/registry/auth"
|
||||||
"github.com/vmware/harbor/src/jobservice_v2/env"
|
"github.com/vmware/harbor/src/jobservice_v2/env"
|
||||||
job_utils "github.com/vmware/harbor/src/jobservice_v2/job/impl/utils"
|
"github.com/vmware/harbor/src/jobservice_v2/logger"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
// Replicator call UI's API to start a repliation according to the policy ID
|
||||||
errCanceled = errors.New("the job is canceled")
|
// passed in parameters
|
||||||
)
|
|
||||||
|
|
||||||
// Replicator replicates images from source registry to the destination one
|
|
||||||
type Replicator struct {
|
type Replicator struct {
|
||||||
ctx env.JobContext
|
ctx env.JobContext
|
||||||
repository *repository
|
url string // the URL of UI service
|
||||||
srcRegistry *registry
|
insecure bool
|
||||||
dstRegistry *registry
|
policyID int64
|
||||||
logger *log.Logger
|
client *common_http.Client
|
||||||
retry bool
|
logger logger.Interface
|
||||||
}
|
}
|
||||||
|
|
||||||
// ShouldRetry : retry if the error is network error
|
// ShouldRetry ...
|
||||||
func (r *Replicator) ShouldRetry() bool {
|
func (r *Replicator) ShouldRetry() bool {
|
||||||
return r.retry
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// MaxFails ...
|
// MaxFails ...
|
||||||
func (r *Replicator) MaxFails() uint {
|
func (r *Replicator) MaxFails() uint {
|
||||||
return 3
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate ....
|
// Validate ....
|
||||||
|
@ -53,296 +40,48 @@ func (r *Replicator) Validate(params map[string]interface{}) error {
|
||||||
|
|
||||||
// Run ...
|
// Run ...
|
||||||
func (r *Replicator) Run(ctx env.JobContext, params map[string]interface{}) error {
|
func (r *Replicator) Run(ctx env.JobContext, params map[string]interface{}) error {
|
||||||
err := r.run(ctx, params)
|
|
||||||
r.retry = retry(err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Replicator) run(ctx env.JobContext, params map[string]interface{}) error {
|
|
||||||
// initialize
|
|
||||||
if err := r.init(ctx, params); err != nil {
|
if err := r.init(ctx, params); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// try to create project on destination registry
|
return r.replicate()
|
||||||
if err := r.createProject(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// replicate the images
|
|
||||||
for _, tag := range r.repository.tags {
|
|
||||||
digest, manifest, err := r.pullManifest(tag)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := r.transferLayers(tag, manifest.References()); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := r.pushManifest(tag, digest, manifest); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Replicator) init(ctx env.JobContext, params map[string]interface{}) error {
|
func (r *Replicator) init(ctx env.JobContext, params map[string]interface{}) error {
|
||||||
// TODO
|
r.logger = ctx.GetLogger()
|
||||||
r.logger = log.DefaultLogger()
|
|
||||||
r.ctx = ctx
|
r.ctx = ctx
|
||||||
|
|
||||||
if canceled(r.ctx) {
|
if canceled(r.ctx) {
|
||||||
r.logger.Warning(errCanceled.Error())
|
r.logger.Warning(errCanceled.Error())
|
||||||
return errCanceled
|
return errCanceled
|
||||||
}
|
}
|
||||||
|
|
||||||
// init images that need to be replicated
|
r.policyID = (int64)(params["policy_id"].(float64))
|
||||||
r.repository = &repository{
|
r.url = params["url"].(string)
|
||||||
name: params["repository"].(string),
|
r.insecure = params["insecure"].(bool)
|
||||||
}
|
cred := auth.NewCookieCredential(&http.Cookie{
|
||||||
if tags, ok := params["tags"]; ok {
|
|
||||||
tgs := tags.([]interface{})
|
|
||||||
for _, tg := range tgs {
|
|
||||||
r.repository.tags = append(r.repository.tags, tg.(string))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var err error
|
|
||||||
// init source registry client
|
|
||||||
srcURL := params["src_registry_url"].(string)
|
|
||||||
srcInsecure := params["src_registry_insecure"].(bool)
|
|
||||||
srcCred := auth.NewCookieCredential(&http.Cookie{
|
|
||||||
Name: models.UISecretCookie,
|
Name: models.UISecretCookie,
|
||||||
Value: os.Getenv("JOBSERVICE_SECRET"),
|
Value: secret(),
|
||||||
})
|
})
|
||||||
srcTokenServiceURL := ""
|
|
||||||
if stsu, ok := params["src_token_service_url"]; ok {
|
|
||||||
srcTokenServiceURL = stsu.(string)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(srcTokenServiceURL) > 0 {
|
r.client = common_http.NewClient(&http.Client{
|
||||||
r.srcRegistry, err = initRegistry(srcURL, srcInsecure, srcCred, r.repository.name, srcTokenServiceURL)
|
Transport: reg.GetHTTPTransport(r.insecure),
|
||||||
} else {
|
}, cred)
|
||||||
r.srcRegistry, err = initRegistry(srcURL, srcInsecure, srcCred, r.repository.name)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
r.logger.Errorf("failed to create client for source registry: %v", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// init destination registry client
|
r.logger.Infof("initialization completed: policy ID: %d, URL: %s, insecure: %v",
|
||||||
dstURL := params["dst_registry_url"].(string)
|
r.policyID, r.url, r.insecure)
|
||||||
dstInsecure := params["dst_registry_insecure"].(bool)
|
|
||||||
dstCred := auth.NewBasicAuthCredential(
|
|
||||||
params["dst_registry_username"].(string),
|
|
||||||
params["dst_registry_password"].(string))
|
|
||||||
r.dstRegistry, err = initRegistry(dstURL, dstInsecure, dstCred, r.repository.name)
|
|
||||||
if err != nil {
|
|
||||||
r.logger.Errorf("failed to create client for destination registry: %v", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// get the tag list first if it is null
|
|
||||||
if len(r.repository.tags) == 0 {
|
|
||||||
tags, err := r.srcRegistry.ListTag()
|
|
||||||
if err != nil {
|
|
||||||
r.logger.Errorf("an error occurred while listing tags for the source repository: %v", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(tags) == 0 {
|
|
||||||
err = fmt.Errorf("empty tag list for repository %s", r.repository.name)
|
|
||||||
r.logger.Error(err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
r.repository.tags = tags
|
|
||||||
}
|
|
||||||
|
|
||||||
r.logger.Infof("initialization completed: repository: %s, tags: %v, source registry: URL-%s insecure-%v, destination registry: URL-%s insecure-%v",
|
|
||||||
r.repository.name, r.repository.tags, r.srcRegistry.url, r.srcRegistry.insecure, r.dstRegistry.url, r.dstRegistry.insecure)
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func initRegistry(url string, insecure bool, credential modifier.Modifier,
|
func (r *Replicator) replicate() error {
|
||||||
repository string, tokenServiceURL ...string) (*registry, error) {
|
if err := r.client.Post(fmt.Sprintf("%s/api/replications", r.url), struct {
|
||||||
registry := ®istry{
|
PolicyID int64 `json:"policy_id"`
|
||||||
url: url,
|
}{
|
||||||
insecure: insecure,
|
PolicyID: r.policyID,
|
||||||
}
|
}); err != nil {
|
||||||
|
r.logger.Errorf("failed to send the replication request to %s: %v", r.url, err)
|
||||||
// use the same transport for clients connecting to docker registry and Harbor UI
|
|
||||||
transport := reg.GetHTTPTransport(insecure)
|
|
||||||
|
|
||||||
authorizer := auth.NewStandardTokenAuthorizer(&http.Client{
|
|
||||||
Transport: transport,
|
|
||||||
}, credential, tokenServiceURL...)
|
|
||||||
uam := &job_utils.UserAgentModifier{
|
|
||||||
UserAgent: "harbor-registry-client",
|
|
||||||
}
|
|
||||||
repositoryClient, err := reg.NewRepository(repository, url,
|
|
||||||
&http.Client{
|
|
||||||
Transport: reg.NewTransport(transport, authorizer, uam),
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
registry.Repository = *repositoryClient
|
|
||||||
|
|
||||||
registry.client = common_http.NewClient(
|
|
||||||
&http.Client{
|
|
||||||
Transport: transport,
|
|
||||||
}, credential)
|
|
||||||
return registry, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Replicator) createProject() error {
|
|
||||||
if canceled(r.ctx) {
|
|
||||||
r.logger.Warning(errCanceled.Error())
|
|
||||||
return errCanceled
|
|
||||||
}
|
|
||||||
p, _ := utils.ParseRepository(r.repository.name)
|
|
||||||
project, err := r.srcRegistry.GetProject(p)
|
|
||||||
if err != nil {
|
|
||||||
r.logger.Errorf("failed to get project %s from source registry: %v", p, err)
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
r.logger.Infof("the replication request has been sent to %s successfully", r.url)
|
||||||
if err = r.dstRegistry.CreateProject(project); err != nil {
|
|
||||||
// other jobs may be also doing the same thing when the current job
|
|
||||||
// is creating project or the project has already exist, so when the
|
|
||||||
// response code is 409, continue to do next step
|
|
||||||
if e, ok := err.(*common_http.Error); ok && e.Code == http.StatusConflict {
|
|
||||||
r.logger.Warningf("the status code is 409 when creating project %s on destination registry, try to do next step", p)
|
|
||||||
return nil
|
return nil
|
||||||
}
|
|
||||||
|
|
||||||
r.logger.Errorf("an error occurred while creating project %s on destination registry: %v", p, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
r.logger.Infof("project %s is created on destination registry", p)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Replicator) pullManifest(tag string) (string, distribution.Manifest, error) {
|
|
||||||
if canceled(r.ctx) {
|
|
||||||
r.logger.Warning(errCanceled.Error())
|
|
||||||
return "", nil, errCanceled
|
|
||||||
}
|
|
||||||
|
|
||||||
acceptMediaTypes := []string{schema1.MediaTypeManifest, schema2.MediaTypeManifest}
|
|
||||||
digest, mediaType, payload, err := r.srcRegistry.PullManifest(tag, acceptMediaTypes)
|
|
||||||
if err != nil {
|
|
||||||
r.logger.Errorf("an error occurred while pulling manifest of %s:%s from source registry: %v",
|
|
||||||
r.repository.name, tag, err)
|
|
||||||
return "", nil, err
|
|
||||||
}
|
|
||||||
r.logger.Infof("manifest of %s:%s pulled successfully from source registry: %s",
|
|
||||||
r.repository.name, tag, digest)
|
|
||||||
|
|
||||||
if strings.Contains(mediaType, "application/json") {
|
|
||||||
mediaType = schema1.MediaTypeManifest
|
|
||||||
}
|
|
||||||
|
|
||||||
manifest, _, err := reg.UnMarshal(mediaType, payload)
|
|
||||||
if err != nil {
|
|
||||||
r.logger.Errorf("an error occurred while parsing manifest: %v", err)
|
|
||||||
return "", nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return digest, manifest, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Replicator) transferLayers(tag string, blobs []distribution.Descriptor) error {
|
|
||||||
repository := r.repository.name
|
|
||||||
|
|
||||||
// all blobs(layers and config)
|
|
||||||
for _, blob := range blobs {
|
|
||||||
if canceled(r.ctx) {
|
|
||||||
r.logger.Warning(errCanceled.Error())
|
|
||||||
return errCanceled
|
|
||||||
}
|
|
||||||
|
|
||||||
digest := blob.Digest.String()
|
|
||||||
exist, err := r.dstRegistry.BlobExist(digest)
|
|
||||||
if err != nil {
|
|
||||||
r.logger.Errorf("an error occurred while checking existence of blob %s of %s:%s on destination registry: %v",
|
|
||||||
digest, repository, tag, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if exist {
|
|
||||||
r.logger.Infof("blob %s of %s:%s already exists on the destination registry, skip",
|
|
||||||
digest, repository, tag)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
r.logger.Infof("transferring blob %s of %s:%s to the destination registry ...",
|
|
||||||
digest, repository, tag)
|
|
||||||
size, data, err := r.srcRegistry.PullBlob(digest)
|
|
||||||
if err != nil {
|
|
||||||
r.logger.Errorf("an error occurred while pulling blob %s of %s:%s from the source registry: %v",
|
|
||||||
digest, repository, tag, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if data != nil {
|
|
||||||
defer data.Close()
|
|
||||||
}
|
|
||||||
if err = r.dstRegistry.PushBlob(digest, size, data); err != nil {
|
|
||||||
r.logger.Errorf("an error occurred while pushing blob %s of %s:%s to the distination registry: %v",
|
|
||||||
digest, repository, tag, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
r.logger.Infof("blob %s of %s:%s transferred to the destination registry completed",
|
|
||||||
digest, repository, tag)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Replicator) pushManifest(tag, digest string, manifest distribution.Manifest) error {
|
|
||||||
if canceled(r.ctx) {
|
|
||||||
r.logger.Warning(errCanceled.Error())
|
|
||||||
return errCanceled
|
|
||||||
}
|
|
||||||
|
|
||||||
repository := r.repository.name
|
|
||||||
_, exist, err := r.dstRegistry.ManifestExist(digest)
|
|
||||||
if err != nil {
|
|
||||||
r.logger.Warningf("an error occurred while checking the existence of manifest of %s:%s on the destination registry: %v, try to push manifest",
|
|
||||||
repository, tag, err)
|
|
||||||
} else {
|
|
||||||
if exist {
|
|
||||||
r.logger.Infof("manifest of %s:%s exists on the destination registry, skip manifest pushing",
|
|
||||||
repository, tag)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
mediaType, data, err := manifest.Payload()
|
|
||||||
if err != nil {
|
|
||||||
r.logger.Errorf("an error occurred while getting payload of manifest for %s:%s : %v",
|
|
||||||
repository, tag, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err = r.dstRegistry.PushManifest(tag, mediaType, data); err != nil {
|
|
||||||
r.logger.Errorf("an error occurred while pushing manifest of %s:%s to the destination registry: %v",
|
|
||||||
repository, tag, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
r.logger.Infof("manifest of %s:%s has been pushed to the destination registry",
|
|
||||||
repository, tag)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func canceled(ctx env.JobContext) bool {
|
|
||||||
_, canceled := ctx.OPCommand()
|
|
||||||
return canceled
|
|
||||||
}
|
|
||||||
|
|
||||||
func retry(err error) bool {
|
|
||||||
if err == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
_, ok := err.(net.Error)
|
|
||||||
return ok
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,19 +7,17 @@ import (
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestMaxFails(t *testing.T) {
|
func TestMaxFailsOfReplicator(t *testing.T) {
|
||||||
r := &Replicator{}
|
r := &Replicator{}
|
||||||
assert.Equal(t, uint(3), r.MaxFails())
|
assert.Equal(t, uint(0), r.MaxFails())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestValidate(t *testing.T) {
|
func TestValidateOfReplicator(t *testing.T) {
|
||||||
r := &Replicator{}
|
r := &Replicator{}
|
||||||
require.Nil(t, r.Validate(nil))
|
require.Nil(t, r.Validate(nil))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestShouldRetry(t *testing.T) {
|
func TestShouldRetryOfReplicator(t *testing.T) {
|
||||||
r := &Replicator{}
|
r := &Replicator{}
|
||||||
assert.False(t, r.retry)
|
assert.False(t, r.ShouldRetry())
|
||||||
r.retry = true
|
|
||||||
assert.True(t, r.retry)
|
|
||||||
}
|
}
|
||||||
|
|
351
src/jobservice_v2/job/impl/replication/transfer.go
Normal file
351
src/jobservice_v2/job/impl/replication/transfer.go
Normal file
|
@ -0,0 +1,351 @@
|
||||||
|
package replication
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/distribution"
|
||||||
|
"github.com/docker/distribution/manifest/schema1"
|
||||||
|
"github.com/docker/distribution/manifest/schema2"
|
||||||
|
common_http "github.com/vmware/harbor/src/common/http"
|
||||||
|
"github.com/vmware/harbor/src/common/http/modifier"
|
||||||
|
"github.com/vmware/harbor/src/common/models"
|
||||||
|
"github.com/vmware/harbor/src/common/utils"
|
||||||
|
reg "github.com/vmware/harbor/src/common/utils/registry"
|
||||||
|
"github.com/vmware/harbor/src/common/utils/registry/auth"
|
||||||
|
"github.com/vmware/harbor/src/jobservice_v2/env"
|
||||||
|
job_utils "github.com/vmware/harbor/src/jobservice_v2/job/impl/utils"
|
||||||
|
"github.com/vmware/harbor/src/jobservice_v2/logger"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
errCanceled = errors.New("the job is canceled")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Transfer images from source registry to the destination one
|
||||||
|
type Transfer struct {
|
||||||
|
ctx env.JobContext
|
||||||
|
repository *repository
|
||||||
|
srcRegistry *registry
|
||||||
|
dstRegistry *registry
|
||||||
|
logger logger.Interface
|
||||||
|
retry bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// ShouldRetry : retry if the error is network error
|
||||||
|
func (t *Transfer) ShouldRetry() bool {
|
||||||
|
return t.retry
|
||||||
|
}
|
||||||
|
|
||||||
|
// MaxFails ...
|
||||||
|
func (t *Transfer) MaxFails() uint {
|
||||||
|
return 3
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate ....
|
||||||
|
func (t *Transfer) Validate(params map[string]interface{}) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run ...
|
||||||
|
func (t *Transfer) Run(ctx env.JobContext, params map[string]interface{}) error {
|
||||||
|
err := t.run(ctx, params)
|
||||||
|
t.retry = retry(err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Transfer) run(ctx env.JobContext, params map[string]interface{}) error {
|
||||||
|
// initialize
|
||||||
|
if err := t.init(ctx, params); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// try to create project on destination registry
|
||||||
|
if err := t.createProject(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// replicate the images
|
||||||
|
for _, tag := range t.repository.tags {
|
||||||
|
digest, manifest, err := t.pullManifest(tag)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := t.transferLayers(tag, manifest.References()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := t.pushManifest(tag, digest, manifest); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Transfer) init(ctx env.JobContext, params map[string]interface{}) error {
|
||||||
|
t.logger = ctx.GetLogger()
|
||||||
|
t.ctx = ctx
|
||||||
|
|
||||||
|
if canceled(t.ctx) {
|
||||||
|
t.logger.Warning(errCanceled.Error())
|
||||||
|
return errCanceled
|
||||||
|
}
|
||||||
|
|
||||||
|
// init images that need to be replicated
|
||||||
|
t.repository = &repository{
|
||||||
|
name: params["repository"].(string),
|
||||||
|
}
|
||||||
|
if tags, ok := params["tags"]; ok {
|
||||||
|
tgs := tags.([]interface{})
|
||||||
|
for _, tg := range tgs {
|
||||||
|
t.repository.tags = append(t.repository.tags, tg.(string))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
// init source registry client
|
||||||
|
srcURL := params["src_registry_url"].(string)
|
||||||
|
srcInsecure := params["src_registry_insecure"].(bool)
|
||||||
|
srcCred := auth.NewCookieCredential(&http.Cookie{
|
||||||
|
Name: models.UISecretCookie,
|
||||||
|
Value: secret(),
|
||||||
|
})
|
||||||
|
srcTokenServiceURL := ""
|
||||||
|
if stsu, ok := params["src_token_service_url"]; ok {
|
||||||
|
srcTokenServiceURL = stsu.(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(srcTokenServiceURL) > 0 {
|
||||||
|
t.srcRegistry, err = initRegistry(srcURL, srcInsecure, srcCred, t.repository.name, srcTokenServiceURL)
|
||||||
|
} else {
|
||||||
|
t.srcRegistry, err = initRegistry(srcURL, srcInsecure, srcCred, t.repository.name)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
t.logger.Errorf("failed to create client for source registry: %v", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// init destination registry client
|
||||||
|
dstURL := params["dst_registry_url"].(string)
|
||||||
|
dstInsecure := params["dst_registry_insecure"].(bool)
|
||||||
|
dstCred := auth.NewBasicAuthCredential(
|
||||||
|
params["dst_registry_username"].(string),
|
||||||
|
params["dst_registry_password"].(string))
|
||||||
|
t.dstRegistry, err = initRegistry(dstURL, dstInsecure, dstCred, t.repository.name)
|
||||||
|
if err != nil {
|
||||||
|
t.logger.Errorf("failed to create client for destination registry: %v", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// get the tag list first if it is null
|
||||||
|
if len(t.repository.tags) == 0 {
|
||||||
|
tags, err := t.srcRegistry.ListTag()
|
||||||
|
if err != nil {
|
||||||
|
t.logger.Errorf("an error occurred while listing tags for the source repository: %v", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(tags) == 0 {
|
||||||
|
err = fmt.Errorf("empty tag list for repository %s", t.repository.name)
|
||||||
|
t.logger.Error(err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t.repository.tags = tags
|
||||||
|
}
|
||||||
|
|
||||||
|
t.logger.Infof("initialization completed: repository: %s, tags: %v, source registry: URL-%s insecure-%v, destination registry: URL-%s insecure-%v",
|
||||||
|
t.repository.name, t.repository.tags, t.srcRegistry.url, t.srcRegistry.insecure, t.dstRegistry.url, t.dstRegistry.insecure)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func initRegistry(url string, insecure bool, credential modifier.Modifier,
|
||||||
|
repository string, tokenServiceURL ...string) (*registry, error) {
|
||||||
|
registry := ®istry{
|
||||||
|
url: url,
|
||||||
|
insecure: insecure,
|
||||||
|
}
|
||||||
|
|
||||||
|
// use the same transport for clients connecting to docker registry and Harbor UI
|
||||||
|
transport := reg.GetHTTPTransport(insecure)
|
||||||
|
|
||||||
|
authorizer := auth.NewStandardTokenAuthorizer(&http.Client{
|
||||||
|
Transport: transport,
|
||||||
|
}, credential, tokenServiceURL...)
|
||||||
|
uam := &job_utils.UserAgentModifier{
|
||||||
|
UserAgent: "harbor-registry-client",
|
||||||
|
}
|
||||||
|
repositoryClient, err := reg.NewRepository(repository, url,
|
||||||
|
&http.Client{
|
||||||
|
Transport: reg.NewTransport(transport, authorizer, uam),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
registry.Repository = *repositoryClient
|
||||||
|
|
||||||
|
registry.client = common_http.NewClient(
|
||||||
|
&http.Client{
|
||||||
|
Transport: transport,
|
||||||
|
}, credential)
|
||||||
|
return registry, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Transfer) createProject() error {
|
||||||
|
if canceled(t.ctx) {
|
||||||
|
t.logger.Warning(errCanceled.Error())
|
||||||
|
return errCanceled
|
||||||
|
}
|
||||||
|
p, _ := utils.ParseRepository(t.repository.name)
|
||||||
|
project, err := t.srcRegistry.GetProject(p)
|
||||||
|
if err != nil {
|
||||||
|
t.logger.Errorf("failed to get project %s from source registry: %v", p, err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = t.dstRegistry.CreateProject(project); err != nil {
|
||||||
|
// other jobs may be also doing the same thing when the current job
|
||||||
|
// is creating project or the project has already exist, so when the
|
||||||
|
// response code is 409, continue to do next step
|
||||||
|
if e, ok := err.(*common_http.Error); ok && e.Code == http.StatusConflict {
|
||||||
|
t.logger.Warningf("the status code is 409 when creating project %s on destination registry, try to do next step", p)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
t.logger.Errorf("an error occurred while creating project %s on destination registry: %v", p, err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t.logger.Infof("project %s is created on destination registry", p)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Transfer) pullManifest(tag string) (string, distribution.Manifest, error) {
|
||||||
|
if canceled(t.ctx) {
|
||||||
|
t.logger.Warning(errCanceled.Error())
|
||||||
|
return "", nil, errCanceled
|
||||||
|
}
|
||||||
|
|
||||||
|
acceptMediaTypes := []string{schema1.MediaTypeManifest, schema2.MediaTypeManifest}
|
||||||
|
digest, mediaType, payload, err := t.srcRegistry.PullManifest(tag, acceptMediaTypes)
|
||||||
|
if err != nil {
|
||||||
|
t.logger.Errorf("an error occurred while pulling manifest of %s:%s from source registry: %v",
|
||||||
|
t.repository.name, tag, err)
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
t.logger.Infof("manifest of %s:%s pulled successfully from source registry: %s",
|
||||||
|
t.repository.name, tag, digest)
|
||||||
|
|
||||||
|
if strings.Contains(mediaType, "application/json") {
|
||||||
|
mediaType = schema1.MediaTypeManifest
|
||||||
|
}
|
||||||
|
|
||||||
|
manifest, _, err := reg.UnMarshal(mediaType, payload)
|
||||||
|
if err != nil {
|
||||||
|
t.logger.Errorf("an error occurred while parsing manifest: %v", err)
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return digest, manifest, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Transfer) transferLayers(tag string, blobs []distribution.Descriptor) error {
|
||||||
|
repository := t.repository.name
|
||||||
|
|
||||||
|
// all blobs(layers and config)
|
||||||
|
for _, blob := range blobs {
|
||||||
|
if canceled(t.ctx) {
|
||||||
|
t.logger.Warning(errCanceled.Error())
|
||||||
|
return errCanceled
|
||||||
|
}
|
||||||
|
|
||||||
|
digest := blob.Digest.String()
|
||||||
|
exist, err := t.dstRegistry.BlobExist(digest)
|
||||||
|
if err != nil {
|
||||||
|
t.logger.Errorf("an error occurred while checking existence of blob %s of %s:%s on destination registry: %v",
|
||||||
|
digest, repository, tag, err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if exist {
|
||||||
|
t.logger.Infof("blob %s of %s:%s already exists on the destination registry, skip",
|
||||||
|
digest, repository, tag)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
t.logger.Infof("transferring blob %s of %s:%s to the destination registry ...",
|
||||||
|
digest, repository, tag)
|
||||||
|
size, data, err := t.srcRegistry.PullBlob(digest)
|
||||||
|
if err != nil {
|
||||||
|
t.logger.Errorf("an error occurred while pulling blob %s of %s:%s from the source registry: %v",
|
||||||
|
digest, repository, tag, err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if data != nil {
|
||||||
|
defer data.Close()
|
||||||
|
}
|
||||||
|
if err = t.dstRegistry.PushBlob(digest, size, data); err != nil {
|
||||||
|
t.logger.Errorf("an error occurred while pushing blob %s of %s:%s to the distination registry: %v",
|
||||||
|
digest, repository, tag, err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t.logger.Infof("blob %s of %s:%s transferred to the destination registry completed",
|
||||||
|
digest, repository, tag)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Transfer) pushManifest(tag, digest string, manifest distribution.Manifest) error {
|
||||||
|
if canceled(t.ctx) {
|
||||||
|
t.logger.Warning(errCanceled.Error())
|
||||||
|
return errCanceled
|
||||||
|
}
|
||||||
|
|
||||||
|
repository := t.repository.name
|
||||||
|
_, exist, err := t.dstRegistry.ManifestExist(digest)
|
||||||
|
if err != nil {
|
||||||
|
t.logger.Warningf("an error occurred while checking the existence of manifest of %s:%s on the destination registry: %v, try to push manifest",
|
||||||
|
repository, tag, err)
|
||||||
|
} else {
|
||||||
|
if exist {
|
||||||
|
t.logger.Infof("manifest of %s:%s exists on the destination registry, skip manifest pushing",
|
||||||
|
repository, tag)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
mediaType, data, err := manifest.Payload()
|
||||||
|
if err != nil {
|
||||||
|
t.logger.Errorf("an error occurred while getting payload of manifest for %s:%s : %v",
|
||||||
|
repository, tag, err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err = t.dstRegistry.PushManifest(tag, mediaType, data); err != nil {
|
||||||
|
t.logger.Errorf("an error occurred while pushing manifest of %s:%s to the destination registry: %v",
|
||||||
|
repository, tag, err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t.logger.Infof("manifest of %s:%s has been pushed to the destination registry",
|
||||||
|
repository, tag)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func canceled(ctx env.JobContext) bool {
|
||||||
|
_, canceled := ctx.OPCommand()
|
||||||
|
return canceled
|
||||||
|
}
|
||||||
|
|
||||||
|
func retry(err error) bool {
|
||||||
|
if err == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
_, ok := err.(net.Error)
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func secret() string {
|
||||||
|
return os.Getenv("JOBSERVICE_SECRET")
|
||||||
|
}
|
25
src/jobservice_v2/job/impl/replication/transfer_test.go
Normal file
25
src/jobservice_v2/job/impl/replication/transfer_test.go
Normal file
|
@ -0,0 +1,25 @@
|
||||||
|
package replication
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMaxFailsOfTransfer(t *testing.T) {
|
||||||
|
r := &Transfer{}
|
||||||
|
assert.Equal(t, uint(3), r.MaxFails())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestValidateOfTransfer(t *testing.T) {
|
||||||
|
r := &Transfer{}
|
||||||
|
require.Nil(t, r.Validate(nil))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestShouldRetryOfTransfer(t *testing.T) {
|
||||||
|
r := &Transfer{}
|
||||||
|
assert.False(t, r.ShouldRetry())
|
||||||
|
r.retry = true
|
||||||
|
assert.True(t, r.ShouldRetry())
|
||||||
|
}
|
115
src/jobservice_v2/logger/service_logger.go
Normal file
115
src/jobservice_v2/logger/service_logger.go
Normal file
|
@ -0,0 +1,115 @@
|
||||||
|
// Copyright 2018 The Harbor Authors. All rights reserved.
|
||||||
|
|
||||||
|
package logger
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
)
|
||||||
|
|
||||||
|
//sLogger is used to log for workerpool itself
|
||||||
|
var sLogger Interface
|
||||||
|
|
||||||
|
//SetLogger sets the logger implementation
|
||||||
|
func SetLogger(logger Interface) {
|
||||||
|
sLogger = logger
|
||||||
|
}
|
||||||
|
|
||||||
|
//Debug ...
|
||||||
|
func Debug(v ...interface{}) {
|
||||||
|
if sLogger != nil {
|
||||||
|
sLogger.Debug(v...)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Println(v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
//Debugf for debuging with format
|
||||||
|
func Debugf(format string, v ...interface{}) {
|
||||||
|
if sLogger != nil {
|
||||||
|
sLogger.Debugf(format, v...)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf(format, v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
//Info ...
|
||||||
|
func Info(v ...interface{}) {
|
||||||
|
if sLogger != nil {
|
||||||
|
sLogger.Info(v...)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Println(v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
//Infof for logging info with format
|
||||||
|
func Infof(format string, v ...interface{}) {
|
||||||
|
if sLogger != nil {
|
||||||
|
sLogger.Infof(format, v...)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf(format, v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
//Warning ...
|
||||||
|
func Warning(v ...interface{}) {
|
||||||
|
if sLogger != nil {
|
||||||
|
sLogger.Warning(v...)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Println(v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
//Warningf for warning with format
|
||||||
|
func Warningf(format string, v ...interface{}) {
|
||||||
|
if sLogger != nil {
|
||||||
|
sLogger.Warningf(format, v...)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf(format, v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
//Error for logging error
|
||||||
|
func Error(v ...interface{}) {
|
||||||
|
if sLogger != nil {
|
||||||
|
sLogger.Error(v...)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Println(v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
//Errorf for logging error with format
|
||||||
|
func Errorf(format string, v ...interface{}) {
|
||||||
|
if sLogger != nil {
|
||||||
|
sLogger.Errorf(format, v...)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf(format, v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
//Fatal ...
|
||||||
|
func Fatal(v ...interface{}) {
|
||||||
|
if sLogger != nil {
|
||||||
|
sLogger.Fatal(v...)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Fatal(v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
//Fatalf for fatal error with error
|
||||||
|
func Fatalf(format string, v ...interface{}) {
|
||||||
|
if sLogger != nil {
|
||||||
|
sLogger.Fatalf(format, v...)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Fatalf(format, v...)
|
||||||
|
}
|
|
@ -8,8 +8,6 @@ import (
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/vmware/harbor/src/common/utils/log"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -31,7 +29,7 @@ func NewSweeper(ctx context.Context, workDir string, period uint) *Sweeper {
|
||||||
//Start to work
|
//Start to work
|
||||||
func (s *Sweeper) Start() {
|
func (s *Sweeper) Start() {
|
||||||
go s.loop()
|
go s.loop()
|
||||||
log.Info("Logger sweeper is started")
|
Info("Logger sweeper is started")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Sweeper) loop() {
|
func (s *Sweeper) loop() {
|
||||||
|
@ -41,7 +39,7 @@ func (s *Sweeper) loop() {
|
||||||
}
|
}
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
log.Info("Logger sweeper is stopped")
|
Info("Logger sweeper is stopped")
|
||||||
}()
|
}()
|
||||||
|
|
||||||
//First run
|
//First run
|
||||||
|
@ -66,14 +64,14 @@ func (s *Sweeper) clear() {
|
||||||
count = &cleared
|
count = &cleared
|
||||||
)
|
)
|
||||||
|
|
||||||
log.Info("Start to clear the job outdated log files")
|
Info("Start to clear the job outdated log files")
|
||||||
defer func() {
|
defer func() {
|
||||||
log.Infof("%d job outdated log files cleared", *count)
|
Infof("%d job outdated log files cleared", *count)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
logFiles, err := ioutil.ReadDir(s.workDir)
|
logFiles, err := ioutil.ReadDir(s.workDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Failed to get the outdated log files under '%s' with error: %s\n", s.workDir, err)
|
Errorf("Failed to get the outdated log files under '%s' with error: %s\n", s.workDir, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if len(logFiles) == 0 {
|
if len(logFiles) == 0 {
|
||||||
|
@ -86,7 +84,7 @@ func (s *Sweeper) clear() {
|
||||||
if err := os.Remove(logFilePath); err == nil {
|
if err := os.Remove(logFilePath); err == nil {
|
||||||
cleared++
|
cleared++
|
||||||
} else {
|
} else {
|
||||||
log.Warningf("Failed to remove log file '%s'\n", logFilePath)
|
Warningf("Failed to remove log file '%s'\n", logFilePath)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,6 +9,8 @@ import (
|
||||||
"github.com/vmware/harbor/src/jobservice_v2/config"
|
"github.com/vmware/harbor/src/jobservice_v2/config"
|
||||||
"github.com/vmware/harbor/src/jobservice_v2/env"
|
"github.com/vmware/harbor/src/jobservice_v2/env"
|
||||||
"github.com/vmware/harbor/src/jobservice_v2/job/impl"
|
"github.com/vmware/harbor/src/jobservice_v2/job/impl"
|
||||||
|
ilogger "github.com/vmware/harbor/src/jobservice_v2/job/impl/logger"
|
||||||
|
"github.com/vmware/harbor/src/jobservice_v2/logger"
|
||||||
"github.com/vmware/harbor/src/jobservice_v2/runtime"
|
"github.com/vmware/harbor/src/jobservice_v2/runtime"
|
||||||
"github.com/vmware/harbor/src/jobservice_v2/utils"
|
"github.com/vmware/harbor/src/jobservice_v2/utils"
|
||||||
)
|
)
|
||||||
|
@ -25,6 +27,12 @@ func main() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//Load configurations
|
||||||
|
if err := config.DefaultConfig.Load(*configPath, true); err != nil {
|
||||||
|
fmt.Printf("Failed to load configurations with error: %s\n", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
//Set job context initializer
|
//Set job context initializer
|
||||||
runtime.JobService.SetJobContextInitializer(func(ctx *env.Context) (env.JobContext, error) {
|
runtime.JobService.SetJobContextInitializer(func(ctx *env.Context) (env.JobContext, error) {
|
||||||
secret := config.GetAuthSecret()
|
secret := config.GetAuthSecret()
|
||||||
|
@ -42,6 +50,10 @@ func main() {
|
||||||
return jobCtx, nil
|
return jobCtx, nil
|
||||||
})
|
})
|
||||||
|
|
||||||
|
//New logger for job service
|
||||||
|
sLogger := ilogger.NewServiceLogger(config.GetLogLevel())
|
||||||
|
logger.SetLogger(sLogger)
|
||||||
|
|
||||||
//Start
|
//Start
|
||||||
runtime.JobService.LoadAndRun(*configPath, true)
|
runtime.JobService.LoadAndRun()
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,6 +13,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/vmware/harbor/src/jobservice_v2/errs"
|
"github.com/vmware/harbor/src/jobservice_v2/errs"
|
||||||
|
"github.com/vmware/harbor/src/jobservice_v2/logger"
|
||||||
|
|
||||||
"github.com/vmware/harbor/src/jobservice_v2/period"
|
"github.com/vmware/harbor/src/jobservice_v2/period"
|
||||||
|
|
||||||
|
@ -21,7 +22,6 @@ import (
|
||||||
"github.com/gocraft/work"
|
"github.com/gocraft/work"
|
||||||
|
|
||||||
"github.com/garyburd/redigo/redis"
|
"github.com/garyburd/redigo/redis"
|
||||||
"github.com/vmware/harbor/src/common/utils/log"
|
|
||||||
"github.com/vmware/harbor/src/jobservice_v2/job"
|
"github.com/vmware/harbor/src/jobservice_v2/job"
|
||||||
"github.com/vmware/harbor/src/jobservice_v2/models"
|
"github.com/vmware/harbor/src/jobservice_v2/models"
|
||||||
"github.com/vmware/harbor/src/jobservice_v2/utils"
|
"github.com/vmware/harbor/src/jobservice_v2/utils"
|
||||||
|
@ -94,7 +94,7 @@ func (rjs *RedisJobStatsManager) Start() {
|
||||||
go rjs.loop()
|
go rjs.loop()
|
||||||
rjs.isRunning = true
|
rjs.isRunning = true
|
||||||
|
|
||||||
log.Info("Redis job stats manager is started")
|
logger.Info("Redis job stats manager is started")
|
||||||
}
|
}
|
||||||
|
|
||||||
//Shutdown is implementation of same method in JobStatsManager interface.
|
//Shutdown is implementation of same method in JobStatsManager interface.
|
||||||
|
@ -152,7 +152,7 @@ func (rjs *RedisJobStatsManager) loop() {
|
||||||
rjs.isRunning = false
|
rjs.isRunning = false
|
||||||
//Notify other sub goroutines
|
//Notify other sub goroutines
|
||||||
close(controlChan)
|
close(controlChan)
|
||||||
log.Info("Redis job stats manager is stopped")
|
logger.Info("Redis job stats manager is stopped")
|
||||||
}()
|
}()
|
||||||
|
|
||||||
for {
|
for {
|
||||||
|
@ -176,7 +176,7 @@ func (rjs *RedisJobStatsManager) loop() {
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
} else {
|
} else {
|
||||||
log.Warningf("Failed to process '%s' request with error: %s (%d times tried)\n", item.op, err, maxFails)
|
logger.Warningf("Failed to process '%s' request with error: %s (%d times tried)\n", item.op, err, maxFails)
|
||||||
if item.op == opReportStatus {
|
if item.op == opReportStatus {
|
||||||
clearHookCache = true
|
clearHookCache = true
|
||||||
}
|
}
|
||||||
|
@ -238,7 +238,7 @@ func (rjs *RedisJobStatsManager) Stop(jobID string) error {
|
||||||
//thirdly expire the job stats of this periodic job if exists
|
//thirdly expire the job stats of this periodic job if exists
|
||||||
if err := rjs.expirePeriodicJobStats(theJob.Stats.JobID); err != nil {
|
if err := rjs.expirePeriodicJobStats(theJob.Stats.JobID); err != nil {
|
||||||
//only logged
|
//only logged
|
||||||
log.Errorf("Expire the stats of job %s failed with error: %s\n", theJob.Stats.JobID, err)
|
logger.Errorf("Expire the stats of job %s failed with error: %s\n", theJob.Stats.JobID, err)
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
break
|
break
|
||||||
|
@ -378,7 +378,7 @@ func (rjs *RedisJobStatsManager) submitStatusReportingItem(jobID string, status,
|
||||||
hookURL, err = rjs.getHook(jobID)
|
hookURL, err = rjs.getHook(jobID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
//logged and exit
|
//logged and exit
|
||||||
log.Warningf("no status hook found for job %s\n, abandon status reporting", jobID)
|
logger.Warningf("no status hook found for job %s\n, abandon status reporting", jobID)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -418,7 +418,7 @@ func (rjs *RedisJobStatsManager) expirePeriodicJobStats(jobID string) error {
|
||||||
func (rjs *RedisJobStatsManager) deleteScheduledJobsOfPeriodicPolicy(policyID string, cronSpec string) error {
|
func (rjs *RedisJobStatsManager) deleteScheduledJobsOfPeriodicPolicy(policyID string, cronSpec string) error {
|
||||||
schedule, err := cron.Parse(cronSpec)
|
schedule, err := cron.Parse(cronSpec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("cron spec '%s' is not valid", cronSpec)
|
logger.Errorf("cron spec '%s' is not valid", cronSpec)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -432,7 +432,7 @@ func (rjs *RedisJobStatsManager) deleteScheduledJobsOfPeriodicPolicy(policyID st
|
||||||
epoch := t.Unix()
|
epoch := t.Unix()
|
||||||
if err = rjs.client.DeleteScheduledJob(epoch, policyID); err != nil {
|
if err = rjs.client.DeleteScheduledJob(epoch, policyID); err != nil {
|
||||||
//only logged
|
//only logged
|
||||||
log.Warningf("delete scheduled instance for periodic job %s failed with error: %s\n", policyID, err)
|
logger.Warningf("delete scheduled instance for periodic job %s failed with error: %s\n", policyID, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -453,7 +453,7 @@ func (rjs *RedisJobStatsManager) getCrlCommand(jobID string) (string, error) {
|
||||||
_, err = conn.Do("DEL", key)
|
_, err = conn.Do("DEL", key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
//only logged
|
//only logged
|
||||||
log.Errorf("del key %s failed with error: %s\n", key, err)
|
logger.Errorf("del key %s failed with error: %s\n", key, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return cmd, nil
|
return cmd, nil
|
||||||
|
|
|
@ -9,8 +9,8 @@ import (
|
||||||
"github.com/garyburd/redigo/redis"
|
"github.com/garyburd/redigo/redis"
|
||||||
"github.com/gocraft/work"
|
"github.com/gocraft/work"
|
||||||
"github.com/robfig/cron"
|
"github.com/robfig/cron"
|
||||||
"github.com/vmware/harbor/src/common/utils/log"
|
|
||||||
"github.com/vmware/harbor/src/jobservice_v2/job"
|
"github.com/vmware/harbor/src/jobservice_v2/job"
|
||||||
|
"github.com/vmware/harbor/src/jobservice_v2/logger"
|
||||||
"github.com/vmware/harbor/src/jobservice_v2/utils"
|
"github.com/vmware/harbor/src/jobservice_v2/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -52,7 +52,7 @@ func newPeriodicEnqueuer(namespace string, pool *redis.Pool, policyStore *period
|
||||||
|
|
||||||
func (pe *periodicEnqueuer) start() {
|
func (pe *periodicEnqueuer) start() {
|
||||||
go pe.loop()
|
go pe.loop()
|
||||||
log.Info("Periodic enqueuer is started")
|
logger.Info("Periodic enqueuer is started")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pe *periodicEnqueuer) stop() {
|
func (pe *periodicEnqueuer) stop() {
|
||||||
|
@ -62,7 +62,7 @@ func (pe *periodicEnqueuer) stop() {
|
||||||
|
|
||||||
func (pe *periodicEnqueuer) loop() {
|
func (pe *periodicEnqueuer) loop() {
|
||||||
defer func() {
|
defer func() {
|
||||||
log.Info("Periodic enqueuer is stopped")
|
logger.Info("Periodic enqueuer is stopped")
|
||||||
}()
|
}()
|
||||||
// Begin reaping periodically
|
// Begin reaping periodically
|
||||||
timer := time.NewTimer(periodicEnqueuerSleep + time.Duration(rand.Intn(30))*time.Second)
|
timer := time.NewTimer(periodicEnqueuerSleep + time.Duration(rand.Intn(30))*time.Second)
|
||||||
|
@ -71,7 +71,7 @@ func (pe *periodicEnqueuer) loop() {
|
||||||
if pe.shouldEnqueue() {
|
if pe.shouldEnqueue() {
|
||||||
err := pe.enqueue()
|
err := pe.enqueue()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("periodic_enqueuer.loop.enqueue:%s\n", err)
|
logger.Errorf("periodic_enqueuer.loop.enqueue:%s\n", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -85,7 +85,7 @@ func (pe *periodicEnqueuer) loop() {
|
||||||
if pe.shouldEnqueue() {
|
if pe.shouldEnqueue() {
|
||||||
err := pe.enqueue()
|
err := pe.enqueue()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("periodic_enqueuer.loop.enqueue:%s\n", err)
|
logger.Errorf("periodic_enqueuer.loop.enqueue:%s\n", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -133,7 +133,7 @@ func (pe *periodicEnqueuer) enqueue() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Infof("Schedule job %s for policy %s at %d\n", pj.jobName, pl.PolicyID, epoch)
|
logger.Infof("Schedule job %s for policy %s at %d\n", pj.jobName, pl.PolicyID, epoch)
|
||||||
}
|
}
|
||||||
//Directly use redis conn to update the periodic job (policy) status
|
//Directly use redis conn to update the periodic job (policy) status
|
||||||
//Do not care the result
|
//Do not care the result
|
||||||
|
@ -153,7 +153,7 @@ func (pe *periodicEnqueuer) shouldEnqueue() bool {
|
||||||
if err == redis.ErrNil {
|
if err == redis.ErrNil {
|
||||||
return true
|
return true
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
log.Errorf("periodic_enqueuer.should_enqueue:%s\n", err)
|
logger.Errorf("periodic_enqueuer.should_enqueue:%s\n", err)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -12,8 +12,8 @@ import (
|
||||||
"github.com/robfig/cron"
|
"github.com/robfig/cron"
|
||||||
|
|
||||||
"github.com/garyburd/redigo/redis"
|
"github.com/garyburd/redigo/redis"
|
||||||
"github.com/vmware/harbor/src/common/utils/log"
|
|
||||||
"github.com/vmware/harbor/src/jobservice_v2/env"
|
"github.com/vmware/harbor/src/jobservice_v2/env"
|
||||||
|
"github.com/vmware/harbor/src/jobservice_v2/logger"
|
||||||
"github.com/vmware/harbor/src/jobservice_v2/models"
|
"github.com/vmware/harbor/src/jobservice_v2/models"
|
||||||
"github.com/vmware/harbor/src/jobservice_v2/utils"
|
"github.com/vmware/harbor/src/jobservice_v2/utils"
|
||||||
)
|
)
|
||||||
|
@ -54,7 +54,7 @@ func NewRedisPeriodicScheduler(ctx *env.Context, namespace string, redisPool *re
|
||||||
//Start to serve
|
//Start to serve
|
||||||
func (rps *RedisPeriodicScheduler) Start() {
|
func (rps *RedisPeriodicScheduler) Start() {
|
||||||
defer func() {
|
defer func() {
|
||||||
log.Info("Redis scheduler is stopped")
|
logger.Info("Redis scheduler is stopped")
|
||||||
}()
|
}()
|
||||||
|
|
||||||
//Load existing periodic job policies
|
//Load existing periodic job policies
|
||||||
|
@ -67,7 +67,7 @@ func (rps *RedisPeriodicScheduler) Start() {
|
||||||
//start enqueuer
|
//start enqueuer
|
||||||
rps.enqueuer.start()
|
rps.enqueuer.start()
|
||||||
defer rps.enqueuer.stop()
|
defer rps.enqueuer.stop()
|
||||||
log.Info("Redis scheduler is started")
|
logger.Info("Redis scheduler is started")
|
||||||
|
|
||||||
//blocking here
|
//blocking here
|
||||||
<-rps.context.SystemContext.Done()
|
<-rps.context.SystemContext.Done()
|
||||||
|
@ -234,14 +234,14 @@ func (rps *RedisPeriodicScheduler) Load() error {
|
||||||
if err := policy.DeSerialize(rawPolicy); err != nil {
|
if err := policy.DeSerialize(rawPolicy); err != nil {
|
||||||
//Ignore error which means the policy data is not valid
|
//Ignore error which means the policy data is not valid
|
||||||
//Only logged
|
//Only logged
|
||||||
log.Warningf("failed to deserialize periodic policy with error:%s; raw data: %s\n", err, rawPolicy)
|
logger.Warningf("failed to deserialize periodic policy with error:%s; raw data: %s\n", err, rawPolicy)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
score, err := strconv.ParseInt(string(rawScore), 10, 64)
|
score, err := strconv.ParseInt(string(rawScore), 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
//Ignore error which means the policy data is not valid
|
//Ignore error which means the policy data is not valid
|
||||||
//Only logged
|
//Only logged
|
||||||
log.Warningf("failed to parse the score of the periodic policy with error:%s\n", err)
|
logger.Warningf("failed to parse the score of the periodic policy with error:%s\n", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -261,7 +261,7 @@ func (rps *RedisPeriodicScheduler) Load() error {
|
||||||
rps.pstore.addAll(allPeriodicPolicies)
|
rps.pstore.addAll(allPeriodicPolicies)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Infof("Load %d periodic job policies", len(allPeriodicPolicies))
|
logger.Infof("Load %d periodic job policies", len(allPeriodicPolicies))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -9,7 +9,7 @@ import (
|
||||||
"github.com/gocraft/work"
|
"github.com/gocraft/work"
|
||||||
|
|
||||||
"github.com/garyburd/redigo/redis"
|
"github.com/garyburd/redigo/redis"
|
||||||
"github.com/vmware/harbor/src/common/utils/log"
|
"github.com/vmware/harbor/src/jobservice_v2/logger"
|
||||||
"github.com/vmware/harbor/src/jobservice_v2/utils"
|
"github.com/vmware/harbor/src/jobservice_v2/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -49,7 +49,7 @@ func (s *Sweeper) ClearOutdatedScheduledJobs() error {
|
||||||
|
|
||||||
if r == nil {
|
if r == nil {
|
||||||
//Action is already locked by other workerpool
|
//Action is already locked by other workerpool
|
||||||
log.Info("Ignore clear outdated scheduled jobs")
|
logger.Info("Ignore clear outdated scheduled jobs")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -71,7 +71,7 @@ func (s *Sweeper) ClearOutdatedScheduledJobs() error {
|
||||||
allErrors = append(allErrors, err)
|
allErrors = append(allErrors, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Infof("Clear outdated scheduled job: %s run at %#v\n", j.ID, time.Unix(jobScore.Score, 0).String())
|
logger.Infof("Clear outdated scheduled job: %s run at %#v\n", j.ID, time.Unix(jobScore.Score, 0).String())
|
||||||
}
|
}
|
||||||
|
|
||||||
//Unlock
|
//Unlock
|
||||||
|
|
|
@ -9,11 +9,11 @@ import (
|
||||||
"reflect"
|
"reflect"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/vmware/harbor/src/jobservice_v2/logger"
|
||||||
"github.com/vmware/harbor/src/jobservice_v2/opm"
|
"github.com/vmware/harbor/src/jobservice_v2/opm"
|
||||||
"github.com/vmware/harbor/src/jobservice_v2/period"
|
"github.com/vmware/harbor/src/jobservice_v2/period"
|
||||||
|
|
||||||
"github.com/garyburd/redigo/redis"
|
"github.com/garyburd/redigo/redis"
|
||||||
"github.com/vmware/harbor/src/common/utils/log"
|
|
||||||
"github.com/vmware/harbor/src/jobservice_v2/models"
|
"github.com/vmware/harbor/src/jobservice_v2/models"
|
||||||
"github.com/vmware/harbor/src/jobservice_v2/utils"
|
"github.com/vmware/harbor/src/jobservice_v2/utils"
|
||||||
)
|
)
|
||||||
|
@ -39,7 +39,7 @@ func NewMessageServer(ctx context.Context, namespace string, redisPool *redis.Po
|
||||||
//Start to serve
|
//Start to serve
|
||||||
func (ms *MessageServer) Start() error {
|
func (ms *MessageServer) Start() error {
|
||||||
defer func() {
|
defer func() {
|
||||||
log.Info("Message server is stopped")
|
logger.Info("Message server is stopped")
|
||||||
}()
|
}()
|
||||||
|
|
||||||
//As we get one connection from the pool, don't try to close it.
|
//As we get one connection from the pool, don't try to close it.
|
||||||
|
@ -66,11 +66,11 @@ func (ms *MessageServer) Start() error {
|
||||||
m := &models.Message{}
|
m := &models.Message{}
|
||||||
if err := json.Unmarshal(res.Data, m); err != nil {
|
if err := json.Unmarshal(res.Data, m); err != nil {
|
||||||
//logged
|
//logged
|
||||||
log.Warningf("read invalid message: %s\n", res.Data)
|
logger.Warningf("read invalid message: %s\n", res.Data)
|
||||||
}
|
}
|
||||||
if callback, ok := ms.callbacks[m.Event]; !ok {
|
if callback, ok := ms.callbacks[m.Event]; !ok {
|
||||||
//logged
|
//logged
|
||||||
log.Warningf("no handler to handle event %s\n", m.Event)
|
logger.Warningf("no handler to handle event %s\n", m.Event)
|
||||||
} else {
|
} else {
|
||||||
//Try to recover the concrete type
|
//Try to recover the concrete type
|
||||||
var converted interface{}
|
var converted interface{}
|
||||||
|
@ -95,17 +95,17 @@ func (ms *MessageServer) Start() error {
|
||||||
if e != nil {
|
if e != nil {
|
||||||
err := e.(error)
|
err := e.(error)
|
||||||
//logged
|
//logged
|
||||||
log.Errorf("failed to fire callback with error: %s\n", err)
|
logger.Errorf("failed to fire callback with error: %s\n", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case redis.Subscription:
|
case redis.Subscription:
|
||||||
switch res.Kind {
|
switch res.Kind {
|
||||||
case "subscribe":
|
case "subscribe":
|
||||||
log.Infof("Subscribe redis channel %s\n", res.Channel)
|
logger.Infof("Subscribe redis channel %s\n", res.Channel)
|
||||||
break
|
break
|
||||||
case "unsubscribe":
|
case "unsubscribe":
|
||||||
//Unsubscribe all, means main goroutine is exiting
|
//Unsubscribe all, means main goroutine is exiting
|
||||||
log.Infof("Unsubscribe redis channel %s\n", res.Channel)
|
logger.Infof("Unsubscribe redis channel %s\n", res.Channel)
|
||||||
done <- nil
|
done <- nil
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -113,7 +113,7 @@ func (ms *MessageServer) Start() error {
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
log.Info("Message server is started")
|
logger.Info("Message server is started")
|
||||||
|
|
||||||
ticker := time.NewTicker(time.Minute)
|
ticker := time.NewTicker(time.Minute)
|
||||||
defer ticker.Stop()
|
defer ticker.Stop()
|
||||||
|
|
|
@ -9,9 +9,9 @@ import (
|
||||||
|
|
||||||
"github.com/garyburd/redigo/redis"
|
"github.com/garyburd/redigo/redis"
|
||||||
"github.com/gocraft/work"
|
"github.com/gocraft/work"
|
||||||
"github.com/vmware/harbor/src/common/utils/log"
|
|
||||||
"github.com/vmware/harbor/src/jobservice_v2/env"
|
"github.com/vmware/harbor/src/jobservice_v2/env"
|
||||||
"github.com/vmware/harbor/src/jobservice_v2/job"
|
"github.com/vmware/harbor/src/jobservice_v2/job"
|
||||||
|
"github.com/vmware/harbor/src/jobservice_v2/logger"
|
||||||
"github.com/vmware/harbor/src/jobservice_v2/models"
|
"github.com/vmware/harbor/src/jobservice_v2/models"
|
||||||
"github.com/vmware/harbor/src/jobservice_v2/opm"
|
"github.com/vmware/harbor/src/jobservice_v2/opm"
|
||||||
"github.com/vmware/harbor/src/jobservice_v2/period"
|
"github.com/vmware/harbor/src/jobservice_v2/period"
|
||||||
|
@ -19,10 +19,6 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
dialConnectionTimeout = 30 * time.Second
|
|
||||||
healthCheckPeriod = time.Minute
|
|
||||||
dialReadTimeout = healthCheckPeriod + 10*time.Second
|
|
||||||
dialWriteTimeout = 10 * time.Second
|
|
||||||
workerPoolDeadTime = 10 * time.Second
|
workerPoolDeadTime = 10 * time.Second
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -50,43 +46,21 @@ type GoCraftWorkPool struct {
|
||||||
knownJobs map[string]interface{}
|
knownJobs map[string]interface{}
|
||||||
}
|
}
|
||||||
|
|
||||||
//RedisPoolConfig defines configurations for GoCraftWorkPool.
|
|
||||||
type RedisPoolConfig struct {
|
|
||||||
RedisHost string
|
|
||||||
RedisPort uint
|
|
||||||
Namespace string
|
|
||||||
WorkerCount uint
|
|
||||||
}
|
|
||||||
|
|
||||||
//RedisPoolContext ...
|
//RedisPoolContext ...
|
||||||
//We did not use this context to pass context info so far, just a placeholder.
|
//We did not use this context to pass context info so far, just a placeholder.
|
||||||
type RedisPoolContext struct{}
|
type RedisPoolContext struct{}
|
||||||
|
|
||||||
//NewGoCraftWorkPool is constructor of goCraftWorkPool.
|
//NewGoCraftWorkPool is constructor of goCraftWorkPool.
|
||||||
func NewGoCraftWorkPool(ctx *env.Context, cfg RedisPoolConfig) *GoCraftWorkPool {
|
func NewGoCraftWorkPool(ctx *env.Context, namespace string, workerCount uint, redisPool *redis.Pool) *GoCraftWorkPool {
|
||||||
redisPool := &redis.Pool{
|
pool := work.NewWorkerPool(RedisPoolContext{}, workerCount, namespace, redisPool)
|
||||||
MaxActive: 6,
|
enqueuer := work.NewEnqueuer(namespace, redisPool)
|
||||||
MaxIdle: 6,
|
client := work.NewClient(namespace, redisPool)
|
||||||
Wait: true,
|
scheduler := period.NewRedisPeriodicScheduler(ctx, namespace, redisPool)
|
||||||
Dial: func() (redis.Conn, error) {
|
sweeper := period.NewSweeper(namespace, redisPool, client)
|
||||||
return redis.Dial(
|
statsMgr := opm.NewRedisJobStatsManager(ctx.SystemContext, namespace, redisPool, client, scheduler)
|
||||||
"tcp",
|
msgServer := NewMessageServer(ctx.SystemContext, namespace, redisPool)
|
||||||
fmt.Sprintf("%s:%d", cfg.RedisHost, cfg.RedisPort),
|
|
||||||
redis.DialConnectTimeout(dialConnectionTimeout),
|
|
||||||
redis.DialReadTimeout(dialReadTimeout),
|
|
||||||
redis.DialWriteTimeout(dialWriteTimeout),
|
|
||||||
)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
pool := work.NewWorkerPool(RedisPoolContext{}, cfg.WorkerCount, cfg.Namespace, redisPool)
|
|
||||||
enqueuer := work.NewEnqueuer(cfg.Namespace, redisPool)
|
|
||||||
client := work.NewClient(cfg.Namespace, redisPool)
|
|
||||||
scheduler := period.NewRedisPeriodicScheduler(ctx, cfg.Namespace, redisPool)
|
|
||||||
sweeper := period.NewSweeper(cfg.Namespace, redisPool, client)
|
|
||||||
statsMgr := opm.NewRedisJobStatsManager(ctx.SystemContext, cfg.Namespace, redisPool, client, scheduler)
|
|
||||||
msgServer := NewMessageServer(ctx.SystemContext, cfg.Namespace, redisPool)
|
|
||||||
return &GoCraftWorkPool{
|
return &GoCraftWorkPool{
|
||||||
namespace: cfg.Namespace,
|
namespace: namespace,
|
||||||
redisPool: redisPool,
|
redisPool: redisPool,
|
||||||
pool: pool,
|
pool: pool,
|
||||||
enqueuer: enqueuer,
|
enqueuer: enqueuer,
|
||||||
|
@ -170,20 +144,20 @@ func (gcwp *GoCraftWorkPool) Start() {
|
||||||
go func() {
|
go func() {
|
||||||
defer func() {
|
defer func() {
|
||||||
gcwp.context.WG.Done()
|
gcwp.context.WG.Done()
|
||||||
log.Infof("Redis worker pool is stopped")
|
logger.Infof("Redis worker pool is stopped")
|
||||||
}()
|
}()
|
||||||
|
|
||||||
//Clear dirty data before pool starting
|
//Clear dirty data before pool starting
|
||||||
if err := gcwp.sweeper.ClearOutdatedScheduledJobs(); err != nil {
|
if err := gcwp.sweeper.ClearOutdatedScheduledJobs(); err != nil {
|
||||||
//Only logged
|
//Only logged
|
||||||
log.Errorf("Clear outdated data before pool starting failed with error:%s\n", err)
|
logger.Errorf("Clear outdated data before pool starting failed with error:%s\n", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
//Append middlewares
|
//Append middlewares
|
||||||
gcwp.pool.Middleware((*RedisPoolContext).logJob)
|
gcwp.pool.Middleware((*RedisPoolContext).logJob)
|
||||||
|
|
||||||
gcwp.pool.Start()
|
gcwp.pool.Start()
|
||||||
log.Infof("Redis worker pool is started")
|
logger.Infof("Redis worker pool is started")
|
||||||
|
|
||||||
//Block on listening context and done signal
|
//Block on listening context and done signal
|
||||||
select {
|
select {
|
||||||
|
@ -467,7 +441,7 @@ func (gcwp *GoCraftWorkPool) handleRegisterStatusHook(data interface{}) error {
|
||||||
|
|
||||||
//log the job
|
//log the job
|
||||||
func (rpc *RedisPoolContext) logJob(job *work.Job, next work.NextMiddlewareFunc) error {
|
func (rpc *RedisPoolContext) logJob(job *work.Job, next work.NextMiddlewareFunc) error {
|
||||||
log.Infof("Job incoming: %s:%s", job.Name, job.ID)
|
logger.Infof("Job incoming: %s:%s", job.Name, job.ID)
|
||||||
return next()
|
return next()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -4,14 +4,15 @@ package runtime
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"sync"
|
"sync"
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/garyburd/redigo/redis"
|
||||||
"github.com/vmware/harbor/src/common/job"
|
"github.com/vmware/harbor/src/common/job"
|
||||||
"github.com/vmware/harbor/src/common/utils/log"
|
|
||||||
"github.com/vmware/harbor/src/jobservice_v2/api"
|
"github.com/vmware/harbor/src/jobservice_v2/api"
|
||||||
"github.com/vmware/harbor/src/jobservice_v2/config"
|
"github.com/vmware/harbor/src/jobservice_v2/config"
|
||||||
"github.com/vmware/harbor/src/jobservice_v2/core"
|
"github.com/vmware/harbor/src/jobservice_v2/core"
|
||||||
|
@ -23,6 +24,13 @@ import (
|
||||||
"github.com/vmware/harbor/src/jobservice_v2/pool"
|
"github.com/vmware/harbor/src/jobservice_v2/pool"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
dialConnectionTimeout = 30 * time.Second
|
||||||
|
healthCheckPeriod = time.Minute
|
||||||
|
dialReadTimeout = healthCheckPeriod + 10*time.Second
|
||||||
|
dialWriteTimeout = 10 * time.Second
|
||||||
|
)
|
||||||
|
|
||||||
//JobService ...
|
//JobService ...
|
||||||
var JobService = &Bootstrap{}
|
var JobService = &Bootstrap{}
|
||||||
|
|
||||||
|
@ -40,13 +48,7 @@ func (bs *Bootstrap) SetJobContextInitializer(initializer env.JobContextInitiali
|
||||||
|
|
||||||
//LoadAndRun will load configurations, initialize components and then start the related process to serve requests.
|
//LoadAndRun will load configurations, initialize components and then start the related process to serve requests.
|
||||||
//Return error if meet any problems.
|
//Return error if meet any problems.
|
||||||
func (bs *Bootstrap) LoadAndRun(configFile string, detectEnv bool) {
|
func (bs *Bootstrap) LoadAndRun() {
|
||||||
//Load configurations
|
|
||||||
if err := config.DefaultConfig.Load(configFile, detectEnv); err != nil {
|
|
||||||
log.Errorf("Failed to load configurations with error: %s\n", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
//Create the root context
|
//Create the root context
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
@ -62,7 +64,7 @@ func (bs *Bootstrap) LoadAndRun(configFile string, detectEnv bool) {
|
||||||
if jobCtx, err := bs.jobConextInitializer(rootContext); err == nil {
|
if jobCtx, err := bs.jobConextInitializer(rootContext); err == nil {
|
||||||
rootContext.JobContext = jobCtx
|
rootContext.JobContext = jobCtx
|
||||||
} else {
|
} else {
|
||||||
log.Fatalf("Failed to initialize job context: %s\n", err)
|
logger.Fatalf("Failed to initialize job context: %s\n", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -77,7 +79,7 @@ func (bs *Bootstrap) LoadAndRun(configFile string, detectEnv bool) {
|
||||||
|
|
||||||
//Start the API server
|
//Start the API server
|
||||||
apiServer := bs.loadAndRunAPIServer(rootContext, config.DefaultConfig, ctl)
|
apiServer := bs.loadAndRunAPIServer(rootContext, config.DefaultConfig, ctl)
|
||||||
log.Infof("Server is started at %s:%d with %s", "", config.DefaultConfig.Port, config.DefaultConfig.Protocol)
|
logger.Infof("Server is started at %s:%d with %s", "", config.DefaultConfig.Port, config.DefaultConfig.Protocol)
|
||||||
|
|
||||||
//Start outdated log files sweeper
|
//Start outdated log files sweeper
|
||||||
logSweeper := logger.NewSweeper(ctx, config.GetLogBasePath(), config.GetLogArchivePeriod())
|
logSweeper := logger.NewSweeper(ctx, config.GetLogBasePath(), config.GetLogArchivePeriod())
|
||||||
|
@ -89,7 +91,7 @@ func (bs *Bootstrap) LoadAndRun(configFile string, detectEnv bool) {
|
||||||
select {
|
select {
|
||||||
case <-sig:
|
case <-sig:
|
||||||
case err := <-rootContext.ErrorChan:
|
case err := <-rootContext.ErrorChan:
|
||||||
log.Errorf("Server error:%s\n", err)
|
logger.Errorf("Server error:%s\n", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
//Call cancel to send termination signal to other interested parts.
|
//Call cancel to send termination signal to other interested parts.
|
||||||
|
@ -117,7 +119,7 @@ func (bs *Bootstrap) LoadAndRun(configFile string, detectEnv bool) {
|
||||||
rootContext.WG.Wait()
|
rootContext.WG.Wait()
|
||||||
close <- true
|
close <- true
|
||||||
|
|
||||||
log.Infof("Server gracefully exit")
|
logger.Infof("Server gracefully exit")
|
||||||
}
|
}
|
||||||
|
|
||||||
//Load and run the API server.
|
//Load and run the API server.
|
||||||
|
@ -143,14 +145,25 @@ func (bs *Bootstrap) loadAndRunAPIServer(ctx *env.Context, cfg *config.Configura
|
||||||
|
|
||||||
//Load and run the worker pool
|
//Load and run the worker pool
|
||||||
func (bs *Bootstrap) loadAndRunRedisWorkerPool(ctx *env.Context, cfg *config.Configuration) pool.Interface {
|
func (bs *Bootstrap) loadAndRunRedisWorkerPool(ctx *env.Context, cfg *config.Configuration) pool.Interface {
|
||||||
redisPoolCfg := pool.RedisPoolConfig{
|
redisPool := &redis.Pool{
|
||||||
RedisHost: cfg.PoolConfig.RedisPoolCfg.Host,
|
MaxActive: 6,
|
||||||
RedisPort: cfg.PoolConfig.RedisPoolCfg.Port,
|
MaxIdle: 6,
|
||||||
Namespace: cfg.PoolConfig.RedisPoolCfg.Namespace,
|
Wait: true,
|
||||||
WorkerCount: cfg.PoolConfig.WorkerCount,
|
Dial: func() (redis.Conn, error) {
|
||||||
|
return redis.Dial(
|
||||||
|
"tcp",
|
||||||
|
fmt.Sprintf("%s:%d", cfg.PoolConfig.RedisPoolCfg.Host, cfg.PoolConfig.RedisPoolCfg.Port),
|
||||||
|
redis.DialConnectTimeout(dialConnectionTimeout),
|
||||||
|
redis.DialReadTimeout(dialReadTimeout),
|
||||||
|
redis.DialWriteTimeout(dialWriteTimeout),
|
||||||
|
)
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
redisWorkerPool := pool.NewGoCraftWorkPool(ctx, redisPoolCfg)
|
redisWorkerPool := pool.NewGoCraftWorkPool(ctx,
|
||||||
|
cfg.PoolConfig.RedisPoolCfg.Namespace,
|
||||||
|
cfg.PoolConfig.WorkerCount,
|
||||||
|
redisPool)
|
||||||
//Register jobs here
|
//Register jobs here
|
||||||
if err := redisWorkerPool.RegisterJob(impl.KnownJobReplication, (*impl.ReplicationJob)(nil)); err != nil {
|
if err := redisWorkerPool.RegisterJob(impl.KnownJobReplication, (*impl.ReplicationJob)(nil)); err != nil {
|
||||||
//exit
|
//exit
|
||||||
|
@ -160,8 +173,9 @@ func (bs *Bootstrap) loadAndRunRedisWorkerPool(ctx *env.Context, cfg *config.Con
|
||||||
if err := redisWorkerPool.RegisterJobs(
|
if err := redisWorkerPool.RegisterJobs(
|
||||||
map[string]interface{}{
|
map[string]interface{}{
|
||||||
job.ImageScanJob: (*scan.ClairJob)(nil),
|
job.ImageScanJob: (*scan.ClairJob)(nil),
|
||||||
job.ImageReplicationTransfer: (*replication.Replicator)(nil),
|
job.ImageTransfer: (*replication.Transfer)(nil),
|
||||||
job.ImageReplicationDelete: (*replication.Deleter)(nil),
|
job.ImageDelete: (*replication.Deleter)(nil),
|
||||||
|
job.ImageReplicate: (*replication.Replicator)(nil),
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
//exit
|
//exit
|
||||||
ctx.ErrorChan <- err
|
ctx.ErrorChan <- err
|
||||||
|
|
|
@ -41,7 +41,7 @@ func (r *ReplicationAPI) Prepare() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if !r.SecurityCtx.IsSysAdmin() {
|
if !r.SecurityCtx.IsSysAdmin() && !r.SecurityCtx.IsSolutionUser() {
|
||||||
r.HandleForbidden(r.SecurityCtx.GetUsername())
|
r.HandleForbidden(r.SecurityCtx.GetUsername())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue
Block a user