fix: remove old artifact model (#11112)

Signed-off-by: He Weiwei <hweiwei@vmware.com>
This commit is contained in:
He Weiwei 2020-03-18 14:20:06 +08:00 committed by GitHub
parent 2f7ce0da1c
commit 7d20154db5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 1 additions and 1807 deletions

View File

@ -13,7 +13,7 @@ table artifact:
pull_time timestamp, pull_time timestamp,
extra_attrs text, extra_attrs text,
annotations jsonb, annotations jsonb,
CONSTRAINT unique_artifact_2 UNIQUE (repository_id, digest) CONSTRAINT unique_artifact UNIQUE (repository_id, digest)
*/ */
ALTER TABLE admin_job ADD COLUMN job_parameters varchar(255) Default ''; ALTER TABLE admin_job ADD COLUMN job_parameters varchar(255) Default '';
@ -163,28 +163,6 @@ SELECT label.label_id, repo_tag.artifact_id, label.creation_time, label.update_t
/*remove the records for images in table 'harbor_resource_label'*/ /*remove the records for images in table 'harbor_resource_label'*/
DELETE FROM harbor_resource_label WHERE resource_type = 'i'; DELETE FROM harbor_resource_label WHERE resource_type = 'i';
/* TODO remove this table after clean up code that related with the old artifact model */
CREATE TABLE artifact_2
(
id SERIAL PRIMARY KEY NOT NULL,
project_id int NOT NULL,
repo varchar(255) NOT NULL,
tag varchar(255) NOT NULL,
/*
digest of manifest
*/
digest varchar(255) NOT NULL,
/*
kind of artifact, image, chart, etc..
*/
kind varchar(255) NOT NULL,
creation_time timestamp default CURRENT_TIMESTAMP,
pull_time timestamp,
push_time timestamp,
CONSTRAINT unique_artifact_2 UNIQUE (project_id, repo, tag)
);
CREATE TABLE audit_log CREATE TABLE audit_log
( (
id SERIAL PRIMARY KEY NOT NULL, id SERIAL PRIMARY KEY NOT NULL,

View File

@ -1,142 +0,0 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dao
import (
"strings"
"time"
"github.com/astaxie/beego/orm"
"github.com/goharbor/harbor/src/common/models"
)
// AddArtifact ...
func AddArtifact(af *models.Artifact) (int64, error) {
now := time.Now()
af.CreationTime = now
af.PushTime = now
id, err := GetOrmer().Insert(af)
if err != nil {
if strings.Contains(err.Error(), "duplicate key value violates unique constraint") {
return 0, ErrDupRows
}
return 0, err
}
return id, nil
}
// UpdateArtifact ...
func UpdateArtifact(af *models.Artifact) error {
_, err := GetOrmer().Update(af)
return err
}
// UpdateArtifactDigest ...
func UpdateArtifactDigest(af *models.Artifact) error {
_, err := GetOrmer().Update(af, "digest")
return err
}
// UpdateArtifactPullTime updates the pull time of the artifact.
func UpdateArtifactPullTime(af *models.Artifact) error {
_, err := GetOrmer().Update(af, "pull_time")
return err
}
// DeleteArtifact ...
func DeleteArtifact(id int64) error {
_, err := GetOrmer().QueryTable(&models.Artifact{}).Filter("ID", id).Delete()
return err
}
// DeleteArtifactByDigest ...
func DeleteArtifactByDigest(projectID int64, repo, digest string) error {
_, err := GetOrmer().Raw(`delete from artifact_2 where project_id = ? and repo = ? and digest = ? `,
projectID, repo, digest).Exec()
if err != nil {
return err
}
return nil
}
// DeleteArtifactByTag ...
func DeleteArtifactByTag(projectID int64, repo, tag string) error {
_, err := GetOrmer().Raw(`delete from artifact_2 where project_id = ? and repo = ? and tag = ? `,
projectID, repo, tag).Exec()
if err != nil {
return err
}
return nil
}
// ListArtifacts list artifacts according to the query conditions
func ListArtifacts(query *models.ArtifactQuery) ([]*models.Artifact, error) {
qs := getArtifactQuerySetter(query)
if query.Size > 0 {
qs = qs.Limit(query.Size)
if query.Page > 0 {
qs = qs.Offset((query.Page - 1) * query.Size)
}
}
afs := []*models.Artifact{}
_, err := qs.All(&afs)
return afs, err
}
// GetArtifact by repository and tag
func GetArtifact(repo, tag string) (*models.Artifact, error) {
artifact := &models.Artifact{}
err := GetOrmer().QueryTable(&models.Artifact{}).
Filter("Repo", repo).
Filter("Tag", tag).One(artifact)
if err != nil {
if err == orm.ErrNoRows {
return nil, nil
}
return nil, err
}
return artifact, nil
}
// GetTotalOfArtifacts returns total of artifacts
func GetTotalOfArtifacts(query ...*models.ArtifactQuery) (int64, error) {
var qs orm.QuerySeter
if len(query) > 0 {
qs = getArtifactQuerySetter(query[0])
} else {
qs = GetOrmer().QueryTable(&models.Artifact{})
}
return qs.Count()
}
func getArtifactQuerySetter(query *models.ArtifactQuery) orm.QuerySeter {
qs := GetOrmer().QueryTable(&models.Artifact{})
if query.PID != 0 {
qs = qs.Filter("PID", query.PID)
}
if len(query.Repo) > 0 {
qs = qs.Filter("Repo", query.Repo)
}
if len(query.Tag) > 0 {
qs = qs.Filter("Tag", query.Tag)
}
if len(query.Digest) > 0 {
qs = qs.Filter("Digest", query.Digest)
}
return qs
}

View File

@ -1,110 +0,0 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dao
import (
"fmt"
"github.com/astaxie/beego/orm"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/utils/log"
"github.com/pkg/errors"
"strconv"
"strings"
"time"
)
// AddArtifactNBlob ...
func AddArtifactNBlob(afnb *models.ArtifactAndBlob) (int64, error) {
now := time.Now()
afnb.CreationTime = now
id, err := GetOrmer().Insert(afnb)
if err != nil {
if strings.Contains(err.Error(), "duplicate key value violates unique constraint") {
return 0, ErrDupRows
}
return 0, err
}
return id, nil
}
// AddArtifactNBlobs ...
func AddArtifactNBlobs(afnbs []*models.ArtifactAndBlob) error {
o := orm.NewOrm()
err := o.Begin()
if err != nil {
return err
}
var errInsertMultiple error
total := len(afnbs)
successNums, err := o.InsertMulti(total, afnbs)
if err != nil {
errInsertMultiple = err
if strings.Contains(err.Error(), "duplicate key value violates unique constraint") {
errInsertMultiple = errors.Wrap(errInsertMultiple, ErrDupRows.Error())
}
err := o.Rollback()
if err != nil {
log.Errorf("fail to rollback when to insert multiple artifact and blobs, %v", err)
errInsertMultiple = errors.Wrap(errInsertMultiple, err.Error())
}
return errInsertMultiple
}
// part of them cannot be inserted successfully.
if successNums != int64(total) {
errInsertMultiple = errors.New("Not all of artifact and blobs are inserted successfully")
err := o.Rollback()
if err != nil {
log.Errorf("fail to rollback when to insert multiple artifact and blobs, %v", err)
errInsertMultiple = errors.Wrap(errInsertMultiple, err.Error())
}
return errInsertMultiple
}
err = o.Commit()
if err != nil {
log.Errorf("fail to commit when to insert multiple artifact and blobs, %v", err)
return fmt.Errorf("fail to commit when to insert multiple artifact and blobs, %v", err)
}
return nil
}
// DeleteArtifactAndBlobByDigest ...
func DeleteArtifactAndBlobByDigest(digest string) error {
_, err := GetOrmer().Raw(`delete from artifact_blob where digest_af = ? `, digest).Exec()
if err != nil {
return err
}
return nil
}
// CountSizeOfArtifact ...
func CountSizeOfArtifact(digest string) (int64, error) {
var res []orm.Params
num, err := GetOrmer().Raw(`SELECT sum(bb.size) FROM artifact_blob afnb LEFT JOIN blob bb ON afnb.digest_blob = bb.digest WHERE afnb.digest_af = ? `, digest).Values(&res)
if err != nil {
return -1, err
}
if num > 0 {
size, err := strconv.ParseInt(res[0]["sum"].(string), 0, 64)
if err != nil {
return -1, err
}
return size, nil
}
return -1, err
}

View File

@ -1,127 +0,0 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dao
import (
"testing"
"github.com/goharbor/harbor/src/common/models"
"github.com/stretchr/testify/require"
)
func TestAddArtifactNBlob(t *testing.T) {
afnb := &models.ArtifactAndBlob{
DigestAF: "vvvv",
DigestBlob: "aaaa",
}
// add
_, err := AddArtifactNBlob(afnb)
require.Nil(t, err)
}
func TestAddArtifactNBlobs(t *testing.T) {
afnb1 := &models.ArtifactAndBlob{
DigestAF: "zzzz",
DigestBlob: "zzza",
}
afnb2 := &models.ArtifactAndBlob{
DigestAF: "zzzz",
DigestBlob: "zzzb",
}
afnb3 := &models.ArtifactAndBlob{
DigestAF: "zzzz",
DigestBlob: "zzzc",
}
var afnbs []*models.ArtifactAndBlob
afnbs = append(afnbs, afnb1)
afnbs = append(afnbs, afnb2)
afnbs = append(afnbs, afnb3)
// add
err := AddArtifactNBlobs(afnbs)
require.Nil(t, err)
}
func TestDeleteArtifactAndBlobByDigest(t *testing.T) {
afnb := &models.ArtifactAndBlob{
DigestAF: "vvvv",
DigestBlob: "vvva",
}
// add
_, err := AddArtifactNBlob(afnb)
require.Nil(t, err)
// delete
err = DeleteArtifactAndBlobByDigest(afnb.DigestAF)
require.Nil(t, err)
}
func TestCountSizeOfArtifact(t *testing.T) {
afnb1 := &models.ArtifactAndBlob{
DigestAF: "xxxx",
DigestBlob: "aaaa",
}
afnb2 := &models.ArtifactAndBlob{
DigestAF: "xxxx",
DigestBlob: "aaab",
}
afnb3 := &models.ArtifactAndBlob{
DigestAF: "xxxx",
DigestBlob: "aaac",
}
var afnbs []*models.ArtifactAndBlob
afnbs = append(afnbs, afnb1)
afnbs = append(afnbs, afnb2)
afnbs = append(afnbs, afnb3)
err := AddArtifactNBlobs(afnbs)
require.Nil(t, err)
blob1 := &models.Blob{
Digest: "aaaa",
ContentType: "v2.blob",
Size: 100,
}
_, err = AddBlob(blob1)
require.Nil(t, err)
blob2 := &models.Blob{
Digest: "aaab",
ContentType: "v2.blob",
Size: 200,
}
_, err = AddBlob(blob2)
require.Nil(t, err)
blob3 := &models.Blob{
Digest: "aaac",
ContentType: "v2.blob",
Size: 300,
}
_, err = AddBlob(blob3)
require.Nil(t, err)
imageSize, err := CountSizeOfArtifact("xxxx")
require.Nil(t, err)
require.Equal(t, imageSize, int64(600))
}

View File

@ -1,184 +0,0 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dao
import (
"testing"
"time"
"github.com/goharbor/harbor/src/common/models"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestAddArtifact(t *testing.T) {
af := &models.Artifact{
PID: 1,
Repo: "hello-world",
Tag: "latest",
Digest: "1234abcd",
Kind: "image",
}
// add
id, err := AddArtifact(af)
require.Nil(t, err)
af.ID = id
assert.Equal(t, id, int64(1))
}
func TestGetArtifact(t *testing.T) {
repo := "hello-world"
tag := "latest"
artifact, err := GetArtifact(repo, tag)
require.Nil(t, err)
require.NotNil(t, artifact)
assert.Equal(t, repo, artifact.Repo)
assert.Equal(t, tag, artifact.Tag)
}
func TestUpdateArtifactDigest(t *testing.T) {
af := &models.Artifact{
PID: 1,
Repo: "hello-world",
Tag: "v2.0",
Digest: "4321abcd",
Kind: "image",
}
// add
_, err := AddArtifact(af)
require.Nil(t, err)
af.Digest = "update_4321abcd"
require.Nil(t, UpdateArtifactDigest(af))
assert.Equal(t, af.Digest, "update_4321abcd")
}
func TestUpdateArtifactPullTime(t *testing.T) {
timeNow := time.Now()
af := &models.Artifact{
PID: 1,
Repo: "TestUpdateArtifactPullTime",
Tag: "v1.0",
Digest: "4321abcd",
Kind: "image",
PullTime: timeNow,
}
// add
_, err := AddArtifact(af)
require.Nil(t, err)
time.Sleep(time.Second * 1)
af.PullTime = time.Now()
require.Nil(t, UpdateArtifactPullTime(af))
assert.NotEqual(t, timeNow, af.PullTime)
}
func TestDeleteArtifact(t *testing.T) {
af := &models.Artifact{
PID: 1,
Repo: "hello-world",
Tag: "v1.0",
Digest: "1234abcd",
Kind: "image",
}
// add
id, err := AddArtifact(af)
require.Nil(t, err)
// delete
err = DeleteArtifact(id)
require.Nil(t, err)
}
func TestDeleteArtifactByDigest(t *testing.T) {
af := &models.Artifact{
PID: 1,
Repo: "hello-world",
Tag: "v1.1",
Digest: "TestDeleteArtifactByDigest",
Kind: "image",
}
// add
_, err := AddArtifact(af)
require.Nil(t, err)
// delete
err = DeleteArtifactByDigest(af.PID, af.Repo, af.Digest)
require.Nil(t, err)
}
func TestDeleteArtifactByTag(t *testing.T) {
af := &models.Artifact{
PID: 1,
Repo: "hello-world",
Tag: "v1.2",
Digest: "TestDeleteArtifactByTag",
Kind: "image",
}
// add
_, err := AddArtifact(af)
require.Nil(t, err)
// delete
err = DeleteArtifactByTag(1, "hello-world", "v1.2")
require.Nil(t, err)
}
func TestListArtifacts(t *testing.T) {
af := &models.Artifact{
PID: 1,
Repo: "hello-world",
Tag: "v3.0",
Digest: "TestListArtifacts",
Kind: "image",
}
// add
_, err := AddArtifact(af)
require.Nil(t, err)
afs, err := ListArtifacts(&models.ArtifactQuery{
PID: 1,
Repo: "hello-world",
Tag: "v3.0",
})
require.Nil(t, err)
assert.Equal(t, 1, len(afs))
}
func TestGetTotalOfArtifacts(t *testing.T) {
af := &models.Artifact{
PID: 2,
Repo: "hello-world",
Tag: "v3.0",
Digest: "TestGetTotalOfArtifacts",
Kind: "image",
}
// add
_, err := AddArtifact(af)
require.Nil(t, err)
total, err := GetTotalOfArtifacts(&models.ArtifactQuery{
PID: 2,
Repo: "hello-world",
Tag: "v3.0",
})
require.Nil(t, err)
assert.Equal(t, int64(1), total)
}

View File

@ -1,232 +0,0 @@
package dao
import (
"fmt"
"strings"
"time"
"github.com/docker/distribution"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/utils/log"
)
// AddBlob ...
func AddBlob(blob *models.Blob) (int64, error) {
now := time.Now()
blob.CreationTime = now
id, err := GetOrmer().Insert(blob)
if err != nil {
if strings.Contains(err.Error(), "duplicate key value violates unique constraint") {
return 0, ErrDupRows
}
return 0, err
}
return id, nil
}
// GetOrCreateBlob returns blob by digest, create it if not exists
func GetOrCreateBlob(blob *models.Blob) (bool, *models.Blob, error) {
blob.CreationTime = time.Now()
created, id, err := GetOrmer().ReadOrCreate(blob, "digest")
if err != nil {
return false, nil, err
}
blob.ID = id
return created, blob, nil
}
// GetBlob ...
func GetBlob(digest string) (*models.Blob, error) {
o := GetOrmer()
qs := o.QueryTable(&models.Blob{})
qs = qs.Filter("Digest", digest)
b := []*models.Blob{}
_, err := qs.All(&b)
if err != nil {
return nil, fmt.Errorf("failed to get blob for digest %s, error: %v", digest, err)
}
if len(b) == 0 {
log.Infof("No blob found for digest %s, returning empty.", digest)
return &models.Blob{}, nil
} else if len(b) > 1 {
log.Infof("Multiple blob found for digest %s", digest)
return &models.Blob{}, fmt.Errorf("Multiple blob found for digest %s", digest)
}
return b[0], nil
}
// DeleteBlob ...
func DeleteBlob(digest string) error {
o := GetOrmer()
_, err := o.QueryTable("blob").Filter("digest", digest).Delete()
return err
}
// ListBlobs list blobs according to the query conditions
func ListBlobs(query *models.BlobQuery) ([]*models.Blob, error) {
qs := GetOrmer().QueryTable(&models.Blob{})
if query != nil {
if query.Digest != "" {
qs = qs.Filter("Digest", query.Digest)
}
if query.ContentType != "" {
qs = qs.Filter("ContentType", query.ContentType)
}
if len(query.Digests) > 0 {
qs = qs.Filter("Digest__in", query.Digests)
}
if query.Size > 0 {
qs = qs.Limit(query.Size)
if query.Page > 0 {
qs = qs.Offset((query.Page - 1) * query.Size)
}
}
}
blobs := []*models.Blob{}
_, err := qs.All(&blobs)
return blobs, err
}
// SyncBlobs sync references to blobs
func SyncBlobs(references []distribution.Descriptor) error {
if len(references) == 0 {
return nil
}
var digests []string
for _, reference := range references {
digests = append(digests, reference.Digest.String())
}
existing, err := ListBlobs(&models.BlobQuery{Digests: digests})
if err != nil {
return err
}
mp := make(map[string]*models.Blob, len(existing))
for _, blob := range existing {
mp[blob.Digest] = blob
}
var missing, updating []*models.Blob
for _, reference := range references {
if blob, found := mp[reference.Digest.String()]; found {
if blob.ContentType != reference.MediaType {
blob.ContentType = reference.MediaType
updating = append(updating, blob)
}
} else {
missing = append(missing, &models.Blob{
Digest: reference.Digest.String(),
ContentType: reference.MediaType,
Size: reference.Size,
CreationTime: time.Now(),
})
}
}
o := GetOrmer()
if len(updating) > 0 {
for _, blob := range updating {
if _, err := o.Update(blob, "content_type"); err != nil {
log.Warningf("Failed to update blob %s, error: %v", blob.Digest, err)
}
}
}
if len(missing) > 0 {
_, err = o.InsertMulti(10, missing)
if err != nil {
if strings.Contains(err.Error(), "duplicate key value violates unique constraint") {
return ErrDupRows
}
}
return err
}
return nil
}
// GetBlobsByArtifact returns blobs of artifact
func GetBlobsByArtifact(artifactDigest string) ([]*models.Blob, error) {
sql := `SELECT * FROM blob WHERE digest IN (SELECT digest_blob FROM artifact_blob WHERE digest_af = ?)`
var blobs []*models.Blob
if _, err := GetOrmer().Raw(sql, artifactDigest).QueryRows(&blobs); err != nil {
return nil, err
}
return blobs, nil
}
// GetExclusiveBlobs returns layers of repository:tag which are not shared with other repositories in the project
func GetExclusiveBlobs(projectID int64, repository, digest string) ([]*models.Blob, error) {
var exclusive []*models.Blob
blobs, err := GetBlobsByArtifact(digest)
if err != nil {
return nil, err
}
if len(blobs) == 0 {
return exclusive, nil
}
sql := fmt.Sprintf(`
SELECT
DISTINCT b.digest_blob AS digest
FROM
(
SELECT
digest
FROM
artifact_2
WHERE
(
project_id = ?
AND repo != ?
)
OR (
project_id = ?
AND digest != ?
)
) AS a
LEFT JOIN artifact_blob b ON a.digest = b.digest_af
AND b.digest_blob IN (%s)`, ParamPlaceholderForIn(len(blobs)))
params := []interface{}{projectID, repository, projectID, digest}
for _, blob := range blobs {
params = append(params, blob.Digest)
}
var rows []struct {
Digest string
}
if _, err := GetOrmer().Raw(sql, params...).QueryRows(&rows); err != nil {
return nil, err
}
shared := map[string]bool{}
for _, row := range rows {
shared[row.Digest] = true
}
for _, blob := range blobs {
if !shared[blob.Digest] {
exclusive = append(exclusive, blob)
}
}
return exclusive, nil
}

View File

@ -1,306 +0,0 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dao
import (
"strings"
"testing"
"github.com/docker/distribution"
"github.com/docker/distribution/manifest/schema2"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/utils"
"github.com/opencontainers/go-digest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
)
func TestAddBlob(t *testing.T) {
blob := &models.Blob{
Digest: "1234abcd",
ContentType: "v2.blob",
Size: 1523,
}
// add
_, err := AddBlob(blob)
require.Nil(t, err)
}
func TestGetBlob(t *testing.T) {
blob := &models.Blob{
Digest: "12345abcde",
ContentType: "v2.blob",
Size: 453,
}
// add
id, err := AddBlob(blob)
require.Nil(t, err)
blob.ID = id
blob2, err := GetBlob("12345abcde")
require.Nil(t, err)
assert.Equal(t, blob.Digest, blob2.Digest)
}
func TestDeleteBlob(t *testing.T) {
blob := &models.Blob{
Digest: "123456abcdef",
ContentType: "v2.blob",
Size: 4543,
}
id, err := AddBlob(blob)
require.Nil(t, err)
blob.ID = id
err = DeleteBlob(blob.Digest)
require.Nil(t, err)
}
func TestListBlobs(t *testing.T) {
assert := assert.New(t)
d1 := digest.FromString(utils.GenerateRandomString())
d2 := digest.FromString(utils.GenerateRandomString())
d3 := digest.FromString(utils.GenerateRandomString())
d4 := digest.FromString(utils.GenerateRandomString())
for _, e := range []struct {
Digest digest.Digest
ContentType string
Size int64
}{
{d1, schema2.MediaTypeLayer, 1},
{d2, schema2.MediaTypeLayer, 2},
{d3, schema2.MediaTypeForeignLayer, 3},
{d4, schema2.MediaTypeForeignLayer, 4},
} {
blob := &models.Blob{
Digest: e.Digest.String(),
ContentType: e.ContentType,
Size: e.Size,
}
_, err := AddBlob(blob)
assert.Nil(err)
}
defer func() {
for _, d := range []digest.Digest{d1, d2, d3, d4} {
DeleteBlob(d.String())
}
}()
blobs, err := ListBlobs(&models.BlobQuery{Digest: d1.String()})
assert.Nil(err)
assert.Len(blobs, 1)
blobs, err = ListBlobs(&models.BlobQuery{ContentType: schema2.MediaTypeForeignLayer})
assert.Nil(err)
assert.Len(blobs, 2)
blobs, err = ListBlobs(&models.BlobQuery{Digests: []string{d1.String(), d2.String(), d3.String()}})
assert.Nil(err)
assert.Len(blobs, 3)
}
func TestSyncBlobs(t *testing.T) {
assert := assert.New(t)
d1 := digest.FromString(utils.GenerateRandomString())
d2 := digest.FromString(utils.GenerateRandomString())
d3 := digest.FromString(utils.GenerateRandomString())
d4 := digest.FromString(utils.GenerateRandomString())
blob := &models.Blob{
Digest: d1.String(),
ContentType: schema2.MediaTypeLayer,
Size: 1,
}
_, err := AddBlob(blob)
assert.Nil(err)
assert.Nil(SyncBlobs([]distribution.Descriptor{}))
references := []distribution.Descriptor{
{MediaType: schema2.MediaTypeLayer, Digest: d1, Size: 1},
{MediaType: schema2.MediaTypeForeignLayer, Digest: d2, Size: 2},
{MediaType: schema2.MediaTypeForeignLayer, Digest: d3, Size: 3},
{MediaType: schema2.MediaTypeForeignLayer, Digest: d4, Size: 4},
}
assert.Nil(SyncBlobs(references))
defer func() {
for _, d := range []digest.Digest{d1, d2, d3, d4} {
DeleteBlob(d.String())
}
}()
blobs, err := ListBlobs(&models.BlobQuery{Digests: []string{d1.String(), d2.String(), d3.String(), d4.String()}})
assert.Nil(err)
assert.Len(blobs, 4)
}
func prepareImage(projectID int64, projectName, name, tag string, layerDigests ...string) (string, error) {
digest := digest.FromString(strings.Join(layerDigests, ":")).String()
artifact := &models.Artifact{PID: projectID, Repo: projectName + "/" + name, Digest: digest, Tag: tag}
if _, err := AddArtifact(artifact); err != nil {
return "", err
}
var afnbs []*models.ArtifactAndBlob
blobDigests := append([]string{digest}, layerDigests...)
for _, blobDigest := range blobDigests {
blob := &models.Blob{Digest: blobDigest, Size: 1}
if _, _, err := GetOrCreateBlob(blob); err != nil {
return "", err
}
afnbs = append(afnbs, &models.ArtifactAndBlob{DigestAF: digest, DigestBlob: blobDigest})
}
total, err := GetTotalOfArtifacts(&models.ArtifactQuery{Digest: digest})
if err != nil {
return "", err
}
if total == 1 {
if err := AddArtifactNBlobs(afnbs); err != nil {
return "", err
}
}
return digest, nil
}
func withProject(f func(int64, string)) {
projectName := utils.GenerateRandomString()
projectID, err := AddProject(models.Project{
Name: projectName,
OwnerID: 1,
})
if err != nil {
panic(err)
}
defer func() {
DeleteProject(projectID)
}()
f(projectID, projectName)
}
type GetExclusiveBlobsSuite struct {
suite.Suite
}
func (suite *GetExclusiveBlobsSuite) mustPrepareImage(projectID int64, projectName, name, tag string, layerDigests ...string) string {
digest, err := prepareImage(projectID, projectName, name, tag, layerDigests...)
suite.Nil(err)
return digest
}
func (suite *GetExclusiveBlobsSuite) TestInSameRepository() {
withProject(func(projectID int64, projectName string) {
digest1 := digest.FromString(utils.GenerateRandomString()).String()
digest2 := digest.FromString(utils.GenerateRandomString()).String()
digest3 := digest.FromString(utils.GenerateRandomString()).String()
manifest1 := suite.mustPrepareImage(projectID, projectName, "mysql", "latest", digest1, digest2)
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) {
suite.Len(blobs, 3)
}
manifest2 := suite.mustPrepareImage(projectID, projectName, "mysql", "8.0", digest1, digest2)
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest2); suite.Nil(err) {
suite.Len(blobs, 3)
}
manifest3 := suite.mustPrepareImage(projectID, projectName, "mysql", "dev", digest1, digest2, digest3)
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) {
suite.Len(blobs, 1)
suite.Equal(manifest1, blobs[0].Digest)
}
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest2); suite.Nil(err) {
suite.Len(blobs, 1)
suite.Equal(manifest2, blobs[0].Digest)
}
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest3); suite.Nil(err) {
suite.Len(blobs, 2)
}
})
}
func (suite *GetExclusiveBlobsSuite) TestInDifferentRepositories() {
withProject(func(projectID int64, projectName string) {
digest1 := digest.FromString(utils.GenerateRandomString()).String()
digest2 := digest.FromString(utils.GenerateRandomString()).String()
digest3 := digest.FromString(utils.GenerateRandomString()).String()
manifest1 := suite.mustPrepareImage(projectID, projectName, "mysql", "latest", digest1, digest2)
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) {
suite.Len(blobs, 3)
}
manifest2 := suite.mustPrepareImage(projectID, projectName, "mariadb", "latest", digest1, digest2)
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) {
suite.Len(blobs, 0)
}
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mariadb", manifest2); suite.Nil(err) {
suite.Len(blobs, 0)
}
manifest3 := suite.mustPrepareImage(projectID, projectName, "mysql", "dev", digest1, digest2, digest3)
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) {
suite.Len(blobs, 0)
}
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest2); suite.Nil(err) {
suite.Len(blobs, 0)
}
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest3); suite.Nil(err) {
suite.Len(blobs, 2)
}
})
}
func (suite *GetExclusiveBlobsSuite) TestInDifferentProjects() {
withProject(func(projectID int64, projectName string) {
digest1 := digest.FromString(utils.GenerateRandomString()).String()
digest2 := digest.FromString(utils.GenerateRandomString()).String()
manifest1 := suite.mustPrepareImage(projectID, projectName, "mysql", "latest", digest1, digest2)
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) {
suite.Len(blobs, 3)
}
withProject(func(id int64, name string) {
manifest2 := suite.mustPrepareImage(id, name, "mysql", "latest", digest1, digest2)
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) {
suite.Len(blobs, 3)
}
if blobs, err := GetExclusiveBlobs(id, name+"/mysql", manifest2); suite.Nil(err) {
suite.Len(blobs, 3)
}
})
})
}
func TestRunGetExclusiveBlobsSuite(t *testing.T) {
suite.Run(t, new(GetExclusiveBlobsSuite))
}

View File

@ -1,203 +0,0 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dao
import (
"fmt"
"strings"
"time"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common"
)
// AddBlobToProject ...
func AddBlobToProject(blobID, projectID int64) (int64, error) {
pb := &models.ProjectBlob{
BlobID: blobID,
ProjectID: projectID,
CreationTime: time.Now(),
}
_, id, err := GetOrmer().ReadOrCreate(pb, "blob_id", "project_id")
return id, err
}
// AddBlobsToProject ...
// Note: pq has limitation on support parameters, the maximum length of blobs is 65535
func AddBlobsToProject(projectID int64, blobs ...*models.Blob) (int64, error) {
if len(blobs) == 0 {
return 0, nil
}
now := time.Now()
var projectBlobs []*models.ProjectBlob
for _, blob := range blobs {
projectBlobs = append(projectBlobs, &models.ProjectBlob{
BlobID: blob.ID,
ProjectID: projectID,
CreationTime: now,
})
}
cnt, err := GetOrmer().InsertMulti(100, projectBlobs)
if err != nil {
if strings.Contains(err.Error(), "duplicate key value violates unique constraint") {
return cnt, ErrDupRows
}
return cnt, err
}
return cnt, nil
}
// RemoveBlobsFromProject ...
func RemoveBlobsFromProject(projectID int64, blobs ...*models.Blob) error {
var blobIDs []interface{}
for _, blob := range blobs {
blobIDs = append(blobIDs, blob.ID)
}
if len(blobIDs) == 0 {
return nil
}
sql := fmt.Sprintf(`DELETE FROM project_blob WHERE project_id = ? AND blob_id IN (%s)`, ParamPlaceholderForIn(len(blobIDs)))
_, err := GetOrmer().Raw(sql, projectID, blobIDs).Exec()
return err
}
// HasBlobInProject ...
func HasBlobInProject(projectID int64, digest string) (bool, error) {
sql := `SELECT COUNT(*) FROM project_blob JOIN blob ON project_blob.blob_id = blob.id AND project_id = ? AND digest = ?`
var count int64
if err := GetOrmer().Raw(sql, projectID, digest).QueryRow(&count); err != nil {
return false, err
}
return count > 0, nil
}
// GetBlobsNotInProject returns blobs not in project
func GetBlobsNotInProject(projectID int64, blobDigests ...string) ([]*models.Blob, error) {
if len(blobDigests) == 0 {
return nil, nil
}
sql := fmt.Sprintf("SELECT * FROM blob WHERE id NOT IN (SELECT blob_id FROM project_blob WHERE project_id = ?) AND digest IN (%s)",
ParamPlaceholderForIn(len(blobDigests)))
params := []interface{}{projectID}
for _, digest := range blobDigests {
params = append(params, digest)
}
var blobs []*models.Blob
if _, err := GetOrmer().Raw(sql, params...).QueryRows(&blobs); err != nil {
return nil, err
}
return blobs, nil
}
// CountSizeOfProject ...
// foreign blob won't be calculated
func CountSizeOfProject(pid int64) (int64, error) {
var blobs []models.Blob
sql := `
SELECT
DISTINCT bb.digest,
bb.id,
bb.content_type,
bb.size,
bb.creation_time
FROM artifact_2 af
JOIN artifact_blob afnb
ON af.digest = afnb.digest_af
JOIN BLOB bb
ON afnb.digest_blob = bb.digest
WHERE af.project_id = ?
AND bb.content_type != ?
`
_, err := GetOrmer().Raw(sql, pid, common.ForeignLayer).QueryRows(&blobs)
if err != nil {
return 0, err
}
var size int64
for _, blob := range blobs {
size += blob.Size
}
return size, err
}
// RemoveUntaggedBlobs ...
func RemoveUntaggedBlobs(pid int64) error {
var blobs []models.Blob
sql := `
SELECT
DISTINCT bb.digest,
bb.id,
bb.content_type,
bb.size,
bb.creation_time
FROM artifact_2 af
JOIN artifact_blob afnb
ON af.digest = afnb.digest_af
JOIN BLOB bb
ON afnb.digest_blob = bb.digest
WHERE af.project_id = ?
`
_, err := GetOrmer().Raw(sql, pid).QueryRows(&blobs)
if len(blobs) == 0 {
sql = fmt.Sprintf(`DELETE FROM project_blob WHERE project_id = ?`)
_, err = GetOrmer().Raw(sql, pid).Exec()
if err != nil {
return err
}
return nil
}
var bbIDs []interface{}
for _, bb := range blobs {
bbIDs = append(bbIDs, bb.ID)
}
var projectBlobs []*models.ProjectBlob
sql = fmt.Sprintf(`SELECT * FROM project_blob AS pb WHERE project_id = ? AND pb.blob_id NOT IN (%s)`, ParamPlaceholderForIn(len(bbIDs)))
_, err = GetOrmer().Raw(sql, pid, bbIDs).QueryRows(&projectBlobs)
if err != nil {
return err
}
var pbIDs []interface{}
for _, pb := range projectBlobs {
pbIDs = append(pbIDs, pb.ID)
}
if len(pbIDs) == 0 {
return nil
}
sql = fmt.Sprintf(`DELETE FROM project_blob WHERE id IN (%s)`, ParamPlaceholderForIn(len(pbIDs)))
_, err = GetOrmer().Raw(sql, pbIDs).Exec()
if err != nil {
return err
}
return nil
}

View File

@ -1,447 +0,0 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dao
import (
"testing"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/utils"
"github.com/opencontainers/go-digest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestAddBlobToProject(t *testing.T) {
bbID, err := AddBlob(&models.Blob{
Digest: "TestAddBlobToProject_blob1",
Size: 101,
})
require.Nil(t, err)
pid, err := AddProject(models.Project{
Name: "TestAddBlobToProject_project1",
OwnerID: 1,
})
require.Nil(t, err)
_, err = AddBlobToProject(bbID, pid)
require.Nil(t, err)
}
func TestAddBlobsToProject(t *testing.T) {
var blobs []*models.Blob
pid, err := AddProject(models.Project{
Name: "TestAddBlobsToProject_project1",
OwnerID: 1,
})
require.Nil(t, err)
defer DeleteProject(pid)
blobsCount := 88888
for i := 0; i < blobsCount; i++ {
blob := &models.Blob{
ID: int64(100000 + i), // Use fake id to speed this test
Digest: digest.FromString(utils.GenerateRandomString()).String(),
Size: 100,
}
blobs = append(blobs, blob)
}
cnt, err := AddBlobsToProject(pid, blobs...)
require.Nil(t, err)
require.Equal(t, cnt, int64(blobsCount))
}
func TestHasBlobInProject(t *testing.T) {
_, blob, err := GetOrCreateBlob(&models.Blob{
Digest: digest.FromString(utils.GenerateRandomString()).String(),
Size: 100,
})
require.Nil(t, err)
_, err = AddBlobToProject(blob.ID, 1)
require.Nil(t, err)
has, err := HasBlobInProject(1, blob.Digest)
require.Nil(t, err)
assert.True(t, has)
}
func TestCountSizeOfProject(t *testing.T) {
_, err := AddBlob(&models.Blob{
Digest: "CountSizeOfProject_blob1",
ContentType: "application/vnd.docker.distribution.manifest.v2+json",
Size: 101,
})
require.Nil(t, err)
_, err = AddBlob(&models.Blob{
Digest: "CountSizeOfProject_blob2",
ContentType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
Size: 202,
})
require.Nil(t, err)
_, err = AddBlob(&models.Blob{
Digest: "CountSizeOfProject_blob3",
ContentType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
Size: 303,
})
require.Nil(t, err)
// this blob won't be calculated into project size
_, err = AddBlob(&models.Blob{
Digest: "CountSizeOfProject_blob4",
ContentType: "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip",
Size: 404,
})
require.Nil(t, err)
pid1, err := AddProject(models.Project{
Name: "CountSizeOfProject_project1",
OwnerID: 1,
})
require.Nil(t, err)
af := &models.Artifact{
PID: pid1,
Repo: "hello-world",
Tag: "v1",
Digest: "CountSizeOfProject_af1",
Kind: "image",
}
// add
_, err = AddArtifact(af)
require.Nil(t, err)
afnb1 := &models.ArtifactAndBlob{
DigestAF: "CountSizeOfProject_af1",
DigestBlob: "CountSizeOfProject_blob1",
}
afnb2 := &models.ArtifactAndBlob{
DigestAF: "CountSizeOfProject_af1",
DigestBlob: "CountSizeOfProject_blob2",
}
afnb3 := &models.ArtifactAndBlob{
DigestAF: "CountSizeOfProject_af1",
DigestBlob: "CountSizeOfProject_blob3",
}
afnb4 := &models.ArtifactAndBlob{
DigestAF: "CountSizeOfProject_af1",
DigestBlob: "CountSizeOfProject_blob4",
}
var afnbs []*models.ArtifactAndBlob
afnbs = append(afnbs, afnb1)
afnbs = append(afnbs, afnb2)
afnbs = append(afnbs, afnb3)
afnbs = append(afnbs, afnb4)
// add
err = AddArtifactNBlobs(afnbs)
require.Nil(t, err)
pSize, err := CountSizeOfProject(pid1)
assert.Equal(t, pSize, int64(606))
}
func TestRemoveBlobsFromProject(t *testing.T) {
var blobs1 []*models.Blob
var blobsRm []*models.Blob
bb1 := &models.Blob{
Digest: "TestRemoveBlobsFromProject_blob1",
Size: 101,
}
bb2 := &models.Blob{
Digest: "TestRemoveBlobsFromProject_blob2",
Size: 101,
}
bb3 := &models.Blob{
Digest: "TestRemoveBlobsFromProject_blob3",
Size: 101,
}
_, err := AddBlob(bb1)
require.Nil(t, err)
_, err = AddBlob(bb2)
require.Nil(t, err)
_, err = AddBlob(bb3)
require.Nil(t, err)
blobs1 = append(blobs1, bb1)
blobs1 = append(blobs1, bb2)
blobs1 = append(blobs1, bb3)
blobsRm = append(blobsRm, bb1)
blobsRm = append(blobsRm, bb2)
pid, err := AddProject(models.Project{
Name: "TestRemoveBlobsFromProject_project1",
OwnerID: 1,
})
require.Nil(t, err)
AddBlobsToProject(pid, blobs1...)
err = RemoveBlobsFromProject(pid, blobsRm...)
require.Nil(t, err)
has, err := HasBlobInProject(pid, bb1.Digest)
require.Nil(t, err)
assert.False(t, has)
has, err = HasBlobInProject(pid, bb3.Digest)
require.Nil(t, err)
assert.True(t, has)
}
func TestCountSizeOfProjectDupdigest(t *testing.T) {
_, err := AddBlob(&models.Blob{
Digest: "CountSizeOfProject_blob11",
Size: 101,
})
require.Nil(t, err)
_, err = AddBlob(&models.Blob{
Digest: "CountSizeOfProject_blob22",
Size: 202,
})
require.Nil(t, err)
_, err = AddBlob(&models.Blob{
Digest: "CountSizeOfProject_blob33",
Size: 303,
})
require.Nil(t, err)
_, err = AddBlob(&models.Blob{
Digest: "CountSizeOfProject_blob44",
Size: 404,
})
require.Nil(t, err)
pid1, err := AddProject(models.Project{
Name: "CountSizeOfProject_project11",
OwnerID: 1,
})
require.Nil(t, err)
// add af1 into project
af1 := &models.Artifact{
PID: pid1,
Repo: "hello-world",
Tag: "v1",
Digest: "CountSizeOfProject_af11",
Kind: "image",
}
_, err = AddArtifact(af1)
require.Nil(t, err)
afnb11 := &models.ArtifactAndBlob{
DigestAF: "CountSizeOfProject_af11",
DigestBlob: "CountSizeOfProject_blob11",
}
afnb12 := &models.ArtifactAndBlob{
DigestAF: "CountSizeOfProject_af11",
DigestBlob: "CountSizeOfProject_blob22",
}
afnb13 := &models.ArtifactAndBlob{
DigestAF: "CountSizeOfProject_af11",
DigestBlob: "CountSizeOfProject_blob33",
}
var afnbs1 []*models.ArtifactAndBlob
afnbs1 = append(afnbs1, afnb11)
afnbs1 = append(afnbs1, afnb12)
afnbs1 = append(afnbs1, afnb13)
err = AddArtifactNBlobs(afnbs1)
require.Nil(t, err)
// add af2 into project
af2 := &models.Artifact{
PID: pid1,
Repo: "hello-world",
Tag: "v2",
Digest: "CountSizeOfProject_af22",
Kind: "image",
}
_, err = AddArtifact(af2)
require.Nil(t, err)
afnb21 := &models.ArtifactAndBlob{
DigestAF: "CountSizeOfProject_af22",
DigestBlob: "CountSizeOfProject_blob11",
}
afnb22 := &models.ArtifactAndBlob{
DigestAF: "CountSizeOfProject_af22",
DigestBlob: "CountSizeOfProject_blob22",
}
afnb23 := &models.ArtifactAndBlob{
DigestAF: "CountSizeOfProject_af22",
DigestBlob: "CountSizeOfProject_blob33",
}
afnb24 := &models.ArtifactAndBlob{
DigestAF: "CountSizeOfProject_af22",
DigestBlob: "CountSizeOfProject_blob44",
}
var afnbs2 []*models.ArtifactAndBlob
afnbs2 = append(afnbs2, afnb21)
afnbs2 = append(afnbs2, afnb22)
afnbs2 = append(afnbs2, afnb23)
afnbs2 = append(afnbs2, afnb24)
err = AddArtifactNBlobs(afnbs2)
require.Nil(t, err)
pSize, err := CountSizeOfProject(pid1)
assert.Equal(t, pSize, int64(1010))
}
func TestRemoveUntaggedBlobs(t *testing.T) {
pid1, err := AddProject(models.Project{
Name: "RemoveUntaggedBlobs_project1",
OwnerID: 1,
})
require.Nil(t, err)
_, blob1, err := GetOrCreateBlob(&models.Blob{
Digest: digest.FromString(utils.GenerateRandomString()).String(),
Size: 100,
})
require.Nil(t, err)
_, blob2, err := GetOrCreateBlob(&models.Blob{
Digest: digest.FromString(utils.GenerateRandomString()).String(),
Size: 100,
})
require.Nil(t, err)
_, err = AddBlobToProject(blob1.ID, pid1)
require.Nil(t, err)
_, err = AddBlobToProject(blob2.ID, pid1)
require.Nil(t, err)
has, err := HasBlobInProject(pid1, blob1.Digest)
require.Nil(t, err)
assert.True(t, has)
has, err = HasBlobInProject(pid1, blob2.Digest)
require.Nil(t, err)
assert.True(t, has)
err = RemoveUntaggedBlobs(pid1)
require.Nil(t, err)
has, err = HasBlobInProject(pid1, blob1.Digest)
require.Nil(t, err)
assert.False(t, has)
has, err = HasBlobInProject(pid1, blob2.Digest)
require.Nil(t, err)
assert.False(t, has)
}
func TestRemoveUntaggedBlobsWithNoUntagged(t *testing.T) {
afDigest := digest.FromString(utils.GenerateRandomString()).String()
af := &models.Artifact{
PID: 333,
Repo: "hello-world",
Tag: "latest",
Digest: afDigest,
Kind: "image",
}
_, err := AddArtifact(af)
require.Nil(t, err)
blob1Digest := digest.FromString(utils.GenerateRandomString()).String()
blob1 := &models.Blob{
Digest: blob1Digest,
ContentType: "v2.blob",
Size: 1523,
}
_, err = AddBlob(blob1)
require.Nil(t, err)
blob2Digest := digest.FromString(utils.GenerateRandomString()).String()
blob2 := &models.Blob{
Digest: blob2Digest,
ContentType: "v2.blob",
Size: 1523,
}
_, err = AddBlob(blob2)
require.Nil(t, err)
blob3Digest := digest.FromString(utils.GenerateRandomString()).String()
blob3 := &models.Blob{
Digest: blob3Digest,
ContentType: "v2.blob",
Size: 1523,
}
_, err = AddBlob(blob3)
require.Nil(t, err)
afnb1 := &models.ArtifactAndBlob{
DigestAF: afDigest,
DigestBlob: blob1Digest,
}
afnb2 := &models.ArtifactAndBlob{
DigestAF: afDigest,
DigestBlob: blob2Digest,
}
afnb3 := &models.ArtifactAndBlob{
DigestAF: afDigest,
DigestBlob: blob3Digest,
}
var afnbs []*models.ArtifactAndBlob
afnbs = append(afnbs, afnb1)
afnbs = append(afnbs, afnb2)
afnbs = append(afnbs, afnb3)
err = AddArtifactNBlobs(afnbs)
require.Nil(t, err)
_, err = AddBlobToProject(blob1.ID, 333)
require.Nil(t, err)
_, err = AddBlobToProject(blob2.ID, 333)
require.Nil(t, err)
_, err = AddBlobToProject(blob3.ID, 333)
require.Nil(t, err)
blobUntaggedDigest := digest.FromString(utils.GenerateRandomString()).String()
blobUntagged := &models.Blob{
Digest: blobUntaggedDigest,
ContentType: "v2.blob",
Size: 1523,
}
_, err = AddBlob(blobUntagged)
require.Nil(t, err)
_, err = AddBlobToProject(blobUntagged.ID, 333)
require.Nil(t, err)
err = RemoveUntaggedBlobs(333)
require.Nil(t, err)
has, err := HasBlobInProject(333, blob1.Digest)
require.Nil(t, err)
assert.True(t, has)
has, err = HasBlobInProject(333, blob2.Digest)
require.Nil(t, err)
assert.True(t, has)
has, err = HasBlobInProject(333, blob3.Digest)
require.Nil(t, err)
assert.True(t, has)
has, err = HasBlobInProject(333, blobUntagged.Digest)
require.Nil(t, err)
assert.False(t, has)
}

View File

@ -1,32 +0,0 @@
package models
import (
"time"
)
// Artifact holds the details of a artifact.
type Artifact struct {
ID int64 `orm:"pk;auto;column(id)" json:"id"`
PID int64 `orm:"column(project_id)" json:"project_id"`
Repo string `orm:"column(repo)" json:"repo"`
Tag string `orm:"column(tag)" json:"tag"`
Digest string `orm:"column(digest)" json:"digest"`
Kind string `orm:"column(kind)" json:"kind"`
PushTime time.Time `orm:"column(push_time)" json:"push_time"`
PullTime time.Time `orm:"column(pull_time)" json:"pull_time"`
CreationTime time.Time `orm:"column(creation_time);auto_now_add" json:"creation_time"`
}
// TableName ...
func (af *Artifact) TableName() string {
return "artifact_2"
}
// ArtifactQuery ...
type ArtifactQuery struct {
PID int64
Repo string
Tag string
Digest string
Pagination
}

View File

@ -38,7 +38,6 @@ func init() {
new(NotificationJob), new(NotificationJob),
new(Blob), new(Blob),
new(ProjectBlob), new(ProjectBlob),
new(Artifact),
new(ArtifactAndBlob), new(ArtifactAndBlob),
new(CVEWhitelist), new(CVEWhitelist),
new(Quota), new(Quota),