mirror of
https://github.com/goharbor/harbor
synced 2025-04-21 17:51:41 +00:00
upgrade registry to 2.6.0
This commit is contained in:
parent
3b53b354d3
commit
108aa21499
8
Makefile
8
Makefile
@ -290,7 +290,7 @@ package_offline: compile build modify_composefile
|
|||||||
@cp NOTICE $(HARBORPKG)/NOTICE
|
@cp NOTICE $(HARBORPKG)/NOTICE
|
||||||
|
|
||||||
@echo "pulling nginx and registry..."
|
@echo "pulling nginx and registry..."
|
||||||
@$(DOCKERPULL) registry:2.5.1
|
@$(DOCKERPULL) registry:2.6.0
|
||||||
@$(DOCKERPULL) nginx:1.11.5
|
@$(DOCKERPULL) nginx:1.11.5
|
||||||
@if [ "$(NOTARYFLAG)" = "true" ] ; then \
|
@if [ "$(NOTARYFLAG)" = "true" ] ; then \
|
||||||
echo "pulling notary and mariadb..."; \
|
echo "pulling notary and mariadb..."; \
|
||||||
@ -307,7 +307,7 @@ package_offline: compile build modify_composefile
|
|||||||
$(DOCKERIMAGENAME_LOG):$(VERSIONTAG) \
|
$(DOCKERIMAGENAME_LOG):$(VERSIONTAG) \
|
||||||
$(DOCKERIMAGENAME_DB):$(VERSIONTAG) \
|
$(DOCKERIMAGENAME_DB):$(VERSIONTAG) \
|
||||||
$(DOCKERIMAGENAME_JOBSERVICE):$(VERSIONTAG) \
|
$(DOCKERIMAGENAME_JOBSERVICE):$(VERSIONTAG) \
|
||||||
nginx:1.11.5 registry:2.5.1 photon:1.0 \
|
nginx:1.11.5 registry:2.6.0 photon:1.0 \
|
||||||
jiangd/notary:server-0.5.0-fix notary:signer-0.5.0 mariadb:10.1.10; \
|
jiangd/notary:server-0.5.0-fix notary:signer-0.5.0 mariadb:10.1.10; \
|
||||||
else \
|
else \
|
||||||
$(DOCKERSAVE) -o $(HARBORPKG)/$(DOCKERIMGFILE).$(VERSIONTAG).tgz \
|
$(DOCKERSAVE) -o $(HARBORPKG)/$(DOCKERIMGFILE).$(VERSIONTAG).tgz \
|
||||||
@ -316,7 +316,7 @@ package_offline: compile build modify_composefile
|
|||||||
$(DOCKERIMAGENAME_LOG):$(VERSIONTAG) \
|
$(DOCKERIMAGENAME_LOG):$(VERSIONTAG) \
|
||||||
$(DOCKERIMAGENAME_DB):$(VERSIONTAG) \
|
$(DOCKERIMAGENAME_DB):$(VERSIONTAG) \
|
||||||
$(DOCKERIMAGENAME_JOBSERVICE):$(VERSIONTAG) \
|
$(DOCKERIMAGENAME_JOBSERVICE):$(VERSIONTAG) \
|
||||||
nginx:1.11.5 registry:2.5.1 photon:1.0 ; \
|
nginx:1.11.5 registry:2.6.0 photon:1.0 ; \
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@if [ "$(NOTARYFLAG)" = "true" ] ; then \
|
@if [ "$(NOTARYFLAG)" = "true" ] ; then \
|
||||||
@ -400,7 +400,7 @@ cleanimage:
|
|||||||
- $(DOCKERRMIMAGE) -f $(DOCKERIMAGENAME_DB):$(VERSIONTAG)
|
- $(DOCKERRMIMAGE) -f $(DOCKERIMAGENAME_DB):$(VERSIONTAG)
|
||||||
- $(DOCKERRMIMAGE) -f $(DOCKERIMAGENAME_JOBSERVICE):$(VERSIONTAG)
|
- $(DOCKERRMIMAGE) -f $(DOCKERIMAGENAME_JOBSERVICE):$(VERSIONTAG)
|
||||||
- $(DOCKERRMIMAGE) -f $(DOCKERIMAGENAME_LOG):$(VERSIONTAG)
|
- $(DOCKERRMIMAGE) -f $(DOCKERIMAGENAME_LOG):$(VERSIONTAG)
|
||||||
# - $(DOCKERRMIMAGE) -f registry:2.5.1
|
# - $(DOCKERRMIMAGE) -f registry:2.6.0
|
||||||
# - $(DOCKERRMIMAGE) -f nginx:1.11.5
|
# - $(DOCKERRMIMAGE) -f nginx:1.11.5
|
||||||
|
|
||||||
cleandockercomposefile:
|
cleandockercomposefile:
|
||||||
|
@ -191,14 +191,14 @@ Run the below commands on the host which Harbor is deployed on to preview what f
|
|||||||
|
|
||||||
```sh
|
```sh
|
||||||
$ docker-compose stop
|
$ docker-compose stop
|
||||||
$ docker run -it --name gc --rm --volumes-from registry registry:2.5.1 garbage-collect --dry-run /etc/registry/config.yml
|
$ docker run -it --name gc --rm --volumes-from registry registry:2.6.0 garbage-collect --dry-run /etc/registry/config.yml
|
||||||
```
|
```
|
||||||
**NOTE:** The above option "--dry-run" will print the progress without removing any data.
|
**NOTE:** The above option "--dry-run" will print the progress without removing any data.
|
||||||
|
|
||||||
Verify the result of the above test, then use the below commands to perform garbage collection and restart Harbor.
|
Verify the result of the above test, then use the below commands to perform garbage collection and restart Harbor.
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
$ docker run -it --name gc --rm --volumes-from registry registry:2.5.1 garbage-collect /etc/registry/config.yml
|
$ docker run -it --name gc --rm --volumes-from registry registry:2.6.0 garbage-collect /etc/registry/config.yml
|
||||||
$ docker-compose start
|
$ docker-compose start
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -10,7 +10,7 @@ services:
|
|||||||
ports:
|
ports:
|
||||||
- 1514:514
|
- 1514:514
|
||||||
registry:
|
registry:
|
||||||
image: library/registry:2.5.1
|
image: library/registry:2.6.0
|
||||||
restart: always
|
restart: always
|
||||||
volumes:
|
volumes:
|
||||||
- /data/registry:/storage
|
- /data/registry:/storage
|
||||||
|
@ -11,7 +11,7 @@ services:
|
|||||||
networks:
|
networks:
|
||||||
- harbor
|
- harbor
|
||||||
registry:
|
registry:
|
||||||
image: registry:2.5.1
|
image: registry:2.6.0
|
||||||
container_name: registry
|
container_name: registry
|
||||||
restart: always
|
restart: always
|
||||||
volumes:
|
volumes:
|
||||||
|
@ -22,7 +22,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
au "github.com/docker/distribution/registry/client/auth"
|
"github.com/docker/distribution/registry/client/auth/challenge"
|
||||||
"github.com/vmware/harbor/src/common/utils"
|
"github.com/vmware/harbor/src/common/utils"
|
||||||
"github.com/vmware/harbor/src/common/utils/registry"
|
"github.com/vmware/harbor/src/common/utils/registry"
|
||||||
)
|
)
|
||||||
@ -40,7 +40,7 @@ type Authorizer interface {
|
|||||||
type AuthorizerStore struct {
|
type AuthorizerStore struct {
|
||||||
authorizers []Authorizer
|
authorizers []Authorizer
|
||||||
ping *url.URL
|
ping *url.URL
|
||||||
challenges []au.Challenge
|
challenges []challenge.Challenge
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewAuthorizerStore ...
|
// NewAuthorizerStore ...
|
||||||
|
@ -21,7 +21,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/docker/distribution/registry/client/auth"
|
ch "github.com/docker/distribution/registry/client/auth/challenge"
|
||||||
"github.com/vmware/harbor/src/common/utils/test"
|
"github.com/vmware/harbor/src/common/utils/test"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -61,7 +61,7 @@ func (s *simpleAuthorizer) Authorize(req *http.Request,
|
|||||||
|
|
||||||
func TestModify(t *testing.T) {
|
func TestModify(t *testing.T) {
|
||||||
authorizer := &simpleAuthorizer{}
|
authorizer := &simpleAuthorizer{}
|
||||||
challenge := auth.Challenge{
|
challenge := ch.Challenge{
|
||||||
Scheme: "bearer",
|
Scheme: "bearer",
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -72,7 +72,7 @@ func TestModify(t *testing.T) {
|
|||||||
as := &AuthorizerStore{
|
as := &AuthorizerStore{
|
||||||
authorizers: []Authorizer{authorizer},
|
authorizers: []Authorizer{authorizer},
|
||||||
ping: ping,
|
ping: ping,
|
||||||
challenges: []auth.Challenge{challenge},
|
challenges: []ch.Challenge{challenge},
|
||||||
}
|
}
|
||||||
|
|
||||||
req, err := http.NewRequest("GET", "http://example.com/v2/ubuntu/manifests/14.04", nil)
|
req, err := http.NewRequest("GET", "http://example.com/v2/ubuntu/manifests/14.04", nil)
|
||||||
|
@ -18,12 +18,12 @@ package auth
|
|||||||
import (
|
import (
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
au "github.com/docker/distribution/registry/client/auth"
|
"github.com/docker/distribution/registry/client/auth/challenge"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ParseChallengeFromResponse ...
|
// ParseChallengeFromResponse ...
|
||||||
func ParseChallengeFromResponse(resp *http.Response) []au.Challenge {
|
func ParseChallengeFromResponse(resp *http.Response) []challenge.Challenge {
|
||||||
challenges := au.ResponseChallenges(resp)
|
challenges := challenge.ResponseChallenges(resp)
|
||||||
|
|
||||||
return challenges
|
return challenges
|
||||||
}
|
}
|
||||||
|
@ -45,12 +45,17 @@ func TestUnMarshal(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
refs := manifest.References()
|
refs := manifest.References()
|
||||||
if len(refs) != 1 {
|
if len(refs) != 2 {
|
||||||
t.Fatalf("unexpected length of reference: %d != %d", len(refs), 1)
|
t.Fatalf("unexpected length of reference: %d != %d", len(refs), 2)
|
||||||
}
|
}
|
||||||
|
|
||||||
digest := "sha256:c04b14da8d1441880ed3fe6106fb2cc6fa1c9661846ac0266b8a5ec8edf37b7c"
|
digest := "sha256:c54a2cc56cbb2f04003c1cd4507e118af7c0d340fe7e2720f70976c4b75237dc"
|
||||||
if refs[0].Digest.String() != digest {
|
if refs[0].Digest.String() != digest {
|
||||||
t.Errorf("unexpected digest: %s != %s", refs[0].Digest.String(), digest)
|
t.Errorf("unexpected digest: %s != %s", refs[0].Digest.String(), digest)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
digest = "sha256:c04b14da8d1441880ed3fe6106fb2cc6fa1c9661846ac0266b8a5ec8edf37b7c"
|
||||||
|
if refs[1].Digest.String() != digest {
|
||||||
|
t.Errorf("unexpected digest: %s != %s", refs[1].Digest.String(), digest)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -323,12 +323,6 @@ func (m *ManifestPuller) enter() (string, error) {
|
|||||||
blobs = append(blobs, discriptor.Digest.String())
|
blobs = append(blobs, discriptor.Digest.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
// config is also need to be transferred if the schema of manifest is v2
|
|
||||||
manifest2, ok := manifest.(*schema2.DeserializedManifest)
|
|
||||||
if ok {
|
|
||||||
blobs = append(blobs, manifest2.Target().Digest.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
m.logger.Infof("all blobs of %s:%s from %s: %v", name, tag, m.srcURL, blobs)
|
m.logger.Infof("all blobs of %s:%s from %s: %v", name, tag, m.srcURL, blobs)
|
||||||
|
|
||||||
for _, blob := range blobs {
|
for _, blob := range blobs {
|
||||||
|
35
src/vendor/github.com/docker/distribution/AUTHORS
generated
vendored
35
src/vendor/github.com/docker/distribution/AUTHORS
generated
vendored
@ -1,6 +1,8 @@
|
|||||||
|
a-palchikov <deemok@gmail.com>
|
||||||
Aaron Lehmann <aaron.lehmann@docker.com>
|
Aaron Lehmann <aaron.lehmann@docker.com>
|
||||||
Aaron Schlesinger <aschlesinger@deis.com>
|
Aaron Schlesinger <aschlesinger@deis.com>
|
||||||
Aaron Vinson <avinson.public@gmail.com>
|
Aaron Vinson <avinson.public@gmail.com>
|
||||||
|
Adam Duke <adam.v.duke@gmail.com>
|
||||||
Adam Enger <adamenger@gmail.com>
|
Adam Enger <adamenger@gmail.com>
|
||||||
Adrian Mouat <adrian.mouat@gmail.com>
|
Adrian Mouat <adrian.mouat@gmail.com>
|
||||||
Ahmet Alp Balkan <ahmetalpbalkan@gmail.com>
|
Ahmet Alp Balkan <ahmetalpbalkan@gmail.com>
|
||||||
@ -19,6 +21,7 @@ Anis Elleuch <vadmeste@gmail.com>
|
|||||||
Anton Tiurin <noxiouz@yandex.ru>
|
Anton Tiurin <noxiouz@yandex.ru>
|
||||||
Antonio Mercado <amercado@thinknode.com>
|
Antonio Mercado <amercado@thinknode.com>
|
||||||
Antonio Murdaca <runcom@redhat.com>
|
Antonio Murdaca <runcom@redhat.com>
|
||||||
|
Anusha Ragunathan <anusha@docker.com>
|
||||||
Arien Holthuizen <aholthuizen@schubergphilis.com>
|
Arien Holthuizen <aholthuizen@schubergphilis.com>
|
||||||
Arnaud Porterie <arnaud.porterie@docker.com>
|
Arnaud Porterie <arnaud.porterie@docker.com>
|
||||||
Arthur Baars <arthur@semmle.com>
|
Arthur Baars <arthur@semmle.com>
|
||||||
@ -26,12 +29,16 @@ Asuka Suzuki <hello@tanksuzuki.com>
|
|||||||
Avi Miller <avi.miller@oracle.com>
|
Avi Miller <avi.miller@oracle.com>
|
||||||
Ayose Cazorla <ayosec@gmail.com>
|
Ayose Cazorla <ayosec@gmail.com>
|
||||||
BadZen <dave.trombley@gmail.com>
|
BadZen <dave.trombley@gmail.com>
|
||||||
|
Ben Bodenmiller <bbodenmiller@hotmail.com>
|
||||||
Ben Firshman <ben@firshman.co.uk>
|
Ben Firshman <ben@firshman.co.uk>
|
||||||
bin liu <liubin0329@gmail.com>
|
bin liu <liubin0329@gmail.com>
|
||||||
Brian Bland <brian.bland@docker.com>
|
Brian Bland <brian.bland@docker.com>
|
||||||
burnettk <burnettk@gmail.com>
|
burnettk <burnettk@gmail.com>
|
||||||
Carson A <ca@carsonoid.net>
|
Carson A <ca@carsonoid.net>
|
||||||
|
Cezar Sa Espinola <cezarsa@gmail.com>
|
||||||
|
Charles Smith <charles.smith@docker.com>
|
||||||
Chris Dillon <squarism@gmail.com>
|
Chris Dillon <squarism@gmail.com>
|
||||||
|
cuiwei13 <cuiwei13@pku.edu.cn>
|
||||||
cyli <cyli@twistedmatrix.com>
|
cyli <cyli@twistedmatrix.com>
|
||||||
Daisuke Fujita <dtanshi45@gmail.com>
|
Daisuke Fujita <dtanshi45@gmail.com>
|
||||||
Daniel Huhn <daniel@danielhuhn.de>
|
Daniel Huhn <daniel@danielhuhn.de>
|
||||||
@ -48,11 +55,14 @@ Diogo Mónica <diogo.monica@gmail.com>
|
|||||||
DJ Enriquez <dj.enriquez@infospace.com>
|
DJ Enriquez <dj.enriquez@infospace.com>
|
||||||
Donald Huang <don.hcd@gmail.com>
|
Donald Huang <don.hcd@gmail.com>
|
||||||
Doug Davis <dug@us.ibm.com>
|
Doug Davis <dug@us.ibm.com>
|
||||||
|
Edgar Lee <edgar.lee@docker.com>
|
||||||
Eric Yang <windfarer@gmail.com>
|
Eric Yang <windfarer@gmail.com>
|
||||||
|
Fabio Berchtold <jamesclonk@jamesclonk.ch>
|
||||||
Fabio Huser <fabio@fh1.ch>
|
Fabio Huser <fabio@fh1.ch>
|
||||||
farmerworking <farmerworking@gmail.com>
|
farmerworking <farmerworking@gmail.com>
|
||||||
Felix Yan <felixonmars@archlinux.org>
|
Felix Yan <felixonmars@archlinux.org>
|
||||||
Florentin Raud <florentin.raud@gmail.com>
|
Florentin Raud <florentin.raud@gmail.com>
|
||||||
|
Frank Chen <frankchn@gmail.com>
|
||||||
Frederick F. Kautz IV <fkautz@alumni.cmu.edu>
|
Frederick F. Kautz IV <fkautz@alumni.cmu.edu>
|
||||||
gabriell nascimento <gabriell@bluesoft.com.br>
|
gabriell nascimento <gabriell@bluesoft.com.br>
|
||||||
Gleb Schukin <gschukin@ptsecurity.com>
|
Gleb Schukin <gschukin@ptsecurity.com>
|
||||||
@ -64,16 +74,23 @@ HuKeping <hukeping@huawei.com>
|
|||||||
Ian Babrou <ibobrik@gmail.com>
|
Ian Babrou <ibobrik@gmail.com>
|
||||||
igayoso <igayoso@gmail.com>
|
igayoso <igayoso@gmail.com>
|
||||||
Jack Griffin <jackpg14@gmail.com>
|
Jack Griffin <jackpg14@gmail.com>
|
||||||
|
James Findley <jfindley@fastmail.com>
|
||||||
Jason Freidman <jason.freidman@gmail.com>
|
Jason Freidman <jason.freidman@gmail.com>
|
||||||
|
Jason Heiss <jheiss@aput.net>
|
||||||
Jeff Nickoloff <jeff@allingeek.com>
|
Jeff Nickoloff <jeff@allingeek.com>
|
||||||
|
Jess Frazelle <acidburn@google.com>
|
||||||
Jessie Frazelle <jessie@docker.com>
|
Jessie Frazelle <jessie@docker.com>
|
||||||
jhaohai <jhaohai@foxmail.com>
|
jhaohai <jhaohai@foxmail.com>
|
||||||
Jianqing Wang <tsing@jianqing.org>
|
Jianqing Wang <tsing@jianqing.org>
|
||||||
|
Jihoon Chung <jihoon@gmail.com>
|
||||||
|
Joao Fernandes <joao.fernandes@docker.com>
|
||||||
|
John Mulhausen <john@docker.com>
|
||||||
John Starks <jostarks@microsoft.com>
|
John Starks <jostarks@microsoft.com>
|
||||||
Jon Johnson <jonjohnson@google.com>
|
Jon Johnson <jonjohnson@google.com>
|
||||||
Jon Poler <jonathan.poler@apcera.com>
|
Jon Poler <jonathan.poler@apcera.com>
|
||||||
Jonathan Boulle <jonathanboulle@gmail.com>
|
Jonathan Boulle <jonathanboulle@gmail.com>
|
||||||
Jordan Liggitt <jliggitt@redhat.com>
|
Jordan Liggitt <jliggitt@redhat.com>
|
||||||
|
Josh Chorlton <josh.chorlton@docker.com>
|
||||||
Josh Hawn <josh.hawn@docker.com>
|
Josh Hawn <josh.hawn@docker.com>
|
||||||
Julien Fernandez <julien.fernandez@gmail.com>
|
Julien Fernandez <julien.fernandez@gmail.com>
|
||||||
Ke Xu <leonhartx.k@gmail.com>
|
Ke Xu <leonhartx.k@gmail.com>
|
||||||
@ -84,22 +101,30 @@ Kenny Leung <kleung@google.com>
|
|||||||
Li Yi <denverdino@gmail.com>
|
Li Yi <denverdino@gmail.com>
|
||||||
Liu Hua <sdu.liu@huawei.com>
|
Liu Hua <sdu.liu@huawei.com>
|
||||||
liuchang0812 <liuchang0812@gmail.com>
|
liuchang0812 <liuchang0812@gmail.com>
|
||||||
|
Lloyd Ramey <lnr0626@gmail.com>
|
||||||
Louis Kottmann <louis.kottmann@gmail.com>
|
Louis Kottmann <louis.kottmann@gmail.com>
|
||||||
Luke Carpenter <x@rubynerd.net>
|
Luke Carpenter <x@rubynerd.net>
|
||||||
|
Marcus Martins <marcus@docker.com>
|
||||||
Mary Anthony <mary@docker.com>
|
Mary Anthony <mary@docker.com>
|
||||||
Matt Bentley <mbentley@mbentley.net>
|
Matt Bentley <mbentley@mbentley.net>
|
||||||
Matt Duch <matt@learnmetrics.com>
|
Matt Duch <matt@learnmetrics.com>
|
||||||
Matt Moore <mattmoor@google.com>
|
Matt Moore <mattmoor@google.com>
|
||||||
Matt Robenolt <matt@ydekproductions.com>
|
Matt Robenolt <matt@ydekproductions.com>
|
||||||
|
Matthew Green <greenmr@live.co.uk>
|
||||||
Michael Prokop <mika@grml.org>
|
Michael Prokop <mika@grml.org>
|
||||||
Michal Minar <miminar@redhat.com>
|
Michal Minar <miminar@redhat.com>
|
||||||
|
Michal Minář <miminar@redhat.com>
|
||||||
|
Mike Brown <brownwm@us.ibm.com>
|
||||||
Miquel Sabaté <msabate@suse.com>
|
Miquel Sabaté <msabate@suse.com>
|
||||||
|
Misty Stanley-Jones <misty@apache.org>
|
||||||
|
Misty Stanley-Jones <misty@docker.com>
|
||||||
Morgan Bauer <mbauer@us.ibm.com>
|
Morgan Bauer <mbauer@us.ibm.com>
|
||||||
moxiegirl <mary@docker.com>
|
moxiegirl <mary@docker.com>
|
||||||
Nathan Sullivan <nathan@nightsys.net>
|
Nathan Sullivan <nathan@nightsys.net>
|
||||||
nevermosby <robolwq@qq.com>
|
nevermosby <robolwq@qq.com>
|
||||||
Nghia Tran <tcnghia@gmail.com>
|
Nghia Tran <tcnghia@gmail.com>
|
||||||
Nikita Tarasov <nikita@mygento.ru>
|
Nikita Tarasov <nikita@mygento.ru>
|
||||||
|
Noah Treuhaft <noah.treuhaft@docker.com>
|
||||||
Nuutti Kotivuori <nuutti.kotivuori@poplatek.fi>
|
Nuutti Kotivuori <nuutti.kotivuori@poplatek.fi>
|
||||||
Oilbeater <liumengxinfly@gmail.com>
|
Oilbeater <liumengxinfly@gmail.com>
|
||||||
Olivier Gambier <olivier@docker.com>
|
Olivier Gambier <olivier@docker.com>
|
||||||
@ -108,17 +133,23 @@ Omer Cohen <git@omer.io>
|
|||||||
Patrick Devine <patrick.devine@docker.com>
|
Patrick Devine <patrick.devine@docker.com>
|
||||||
Phil Estes <estesp@linux.vnet.ibm.com>
|
Phil Estes <estesp@linux.vnet.ibm.com>
|
||||||
Philip Misiowiec <philip@atlashealth.com>
|
Philip Misiowiec <philip@atlashealth.com>
|
||||||
|
Pierre-Yves Ritschard <pyr@spootnik.org>
|
||||||
|
Qiao Anran <qiaoanran@gmail.com>
|
||||||
|
Randy Barlow <randy@electronsweatshop.com>
|
||||||
Richard Scothern <richard.scothern@docker.com>
|
Richard Scothern <richard.scothern@docker.com>
|
||||||
Rodolfo Carvalho <rhcarvalho@gmail.com>
|
Rodolfo Carvalho <rhcarvalho@gmail.com>
|
||||||
Rusty Conover <rusty@luckydinosaur.com>
|
Rusty Conover <rusty@luckydinosaur.com>
|
||||||
Sean Boran <Boran@users.noreply.github.com>
|
Sean Boran <Boran@users.noreply.github.com>
|
||||||
Sebastiaan van Stijn <github@gone.nl>
|
Sebastiaan van Stijn <github@gone.nl>
|
||||||
|
Sebastien Coavoux <s.coavoux@free.fr>
|
||||||
Serge Dubrouski <sergeyfd@gmail.com>
|
Serge Dubrouski <sergeyfd@gmail.com>
|
||||||
Sharif Nassar <sharif@mrwacky.com>
|
Sharif Nassar <sharif@mrwacky.com>
|
||||||
Shawn Falkner-Horine <dreadpirateshawn@gmail.com>
|
Shawn Falkner-Horine <dreadpirateshawn@gmail.com>
|
||||||
Shreyas Karnik <karnik.shreyas@gmail.com>
|
Shreyas Karnik <karnik.shreyas@gmail.com>
|
||||||
Simon Thulbourn <simon+github@thulbourn.com>
|
Simon Thulbourn <simon+github@thulbourn.com>
|
||||||
|
spacexnice <yaoyao.xyy@alibaba-inc.com>
|
||||||
Spencer Rinehart <anubis@overthemonkey.com>
|
Spencer Rinehart <anubis@overthemonkey.com>
|
||||||
|
Stan Hu <stanhu@gmail.com>
|
||||||
Stefan Majewsky <stefan.majewsky@sap.com>
|
Stefan Majewsky <stefan.majewsky@sap.com>
|
||||||
Stefan Weil <sw@weilnetz.de>
|
Stefan Weil <sw@weilnetz.de>
|
||||||
Stephen J Day <stephen.day@docker.com>
|
Stephen J Day <stephen.day@docker.com>
|
||||||
@ -134,6 +165,8 @@ Tonis Tiigi <tonistiigi@gmail.com>
|
|||||||
Tony Holdstock-Brown <tony@docker.com>
|
Tony Holdstock-Brown <tony@docker.com>
|
||||||
Trevor Pounds <trevor.pounds@gmail.com>
|
Trevor Pounds <trevor.pounds@gmail.com>
|
||||||
Troels Thomsen <troels@thomsen.io>
|
Troels Thomsen <troels@thomsen.io>
|
||||||
|
Victor Vieux <vieux@docker.com>
|
||||||
|
Victoria Bialas <victoria.bialas@docker.com>
|
||||||
Vincent Batts <vbatts@redhat.com>
|
Vincent Batts <vbatts@redhat.com>
|
||||||
Vincent Demeester <vincent@sbr.pm>
|
Vincent Demeester <vincent@sbr.pm>
|
||||||
Vincent Giersch <vincent.giersch@ovh.net>
|
Vincent Giersch <vincent.giersch@ovh.net>
|
||||||
@ -142,6 +175,8 @@ weiyuan.yl <weiyuan.yl@alibaba-inc.com>
|
|||||||
xg.song <xg.song@venusource.com>
|
xg.song <xg.song@venusource.com>
|
||||||
xiekeyang <xiekeyang@huawei.com>
|
xiekeyang <xiekeyang@huawei.com>
|
||||||
Yann ROBERT <yann.robert@anantaplex.fr>
|
Yann ROBERT <yann.robert@anantaplex.fr>
|
||||||
|
yaoyao.xyy <yaoyao.xyy@alibaba-inc.com>
|
||||||
|
yuexiao-wang <wang.yuexiao@zte.com.cn>
|
||||||
yuzou <zouyu7@huawei.com>
|
yuzou <zouyu7@huawei.com>
|
||||||
zhouhaibing089 <zhouhaibing089@gmail.com>
|
zhouhaibing089 <zhouhaibing089@gmail.com>
|
||||||
姜继忠 <jizhong.jiangjz@alibaba-inc.com>
|
姜继忠 <jizhong.jiangjz@alibaba-inc.com>
|
||||||
|
2
src/vendor/github.com/docker/distribution/BUILDING.md
generated
vendored
2
src/vendor/github.com/docker/distribution/BUILDING.md
generated
vendored
@ -11,7 +11,7 @@ Most people should use the [official Registry docker image](https://hub.docker.c
|
|||||||
|
|
||||||
People looking for advanced operational use cases might consider rolling their own image with a custom Dockerfile inheriting `FROM registry:2`.
|
People looking for advanced operational use cases might consider rolling their own image with a custom Dockerfile inheriting `FROM registry:2`.
|
||||||
|
|
||||||
OS X users who want to run natively can do so following [the instructions here](osx-setup-guide.md).
|
OS X users who want to run natively can do so following [the instructions here](https://github.com/docker/docker.github.io/blob/master/registry/recipes/osx-setup-guide.md).
|
||||||
|
|
||||||
### Gotchas
|
### Gotchas
|
||||||
|
|
||||||
|
83
src/vendor/github.com/docker/distribution/CHANGELOG.md
generated
vendored
83
src/vendor/github.com/docker/distribution/CHANGELOG.md
generated
vendored
@ -1,9 +1,82 @@
|
|||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
|
## 2.6.0 (2017-01-18)
|
||||||
|
|
||||||
|
#### Storage
|
||||||
|
- S3: fixed bug in delete due to read-after-write inconsistency
|
||||||
|
- S3: allow EC2 IAM roles to be used when authorizing region endpoints
|
||||||
|
- S3: add Object ACL Support
|
||||||
|
- S3: fix delete method's notion of subpaths
|
||||||
|
- S3: use multipart upload API in `Move` method for performance
|
||||||
|
- S3: add v2 signature signing for legacy S3 clones
|
||||||
|
- Swift: add simple heuristic to detect incomplete DLOs during read ops
|
||||||
|
- Swift: support different user and tenant domains
|
||||||
|
- Swift: bulk deletes in chunks
|
||||||
|
- Aliyun OSS: fix delete method's notion of subpaths
|
||||||
|
- Aliyun OSS: optimize data copy after upload finishes
|
||||||
|
- Azure: close leaking response body
|
||||||
|
- Fix storage drivers dropping non-EOF errors when listing repositories
|
||||||
|
- Compare path properly when listing repositories in catalog
|
||||||
|
- Add a foreign layer URL host whitelist
|
||||||
|
- Improve catalog enumerate runtime
|
||||||
|
|
||||||
|
#### Registry
|
||||||
|
- Export `storage.CreateOptions` in top-level package
|
||||||
|
- Enable notifications to endpoints that use self-signed certificates
|
||||||
|
- Properly validate multi-URL foreign layers
|
||||||
|
- Add control over validation of URLs in pushed manifests
|
||||||
|
- Proxy mode: fix socket leak when pull is cancelled
|
||||||
|
- Tag service: properly handle error responses on HEAD request
|
||||||
|
- Support for custom authentication URL in proxying registry
|
||||||
|
- Add configuration option to disable access logging
|
||||||
|
- Add notification filtering by target media type
|
||||||
|
- Manifest: `References()` returns all children
|
||||||
|
- Honor `X-Forwarded-Port` and Forwarded headers
|
||||||
|
- Reference: Preserve tag and digest in With* functions
|
||||||
|
- Add policy configuration for enforcing repository classes
|
||||||
|
|
||||||
|
#### Client
|
||||||
|
- Changes the client Tags `All()` method to follow links
|
||||||
|
- Allow registry clients to connect via HTTP2
|
||||||
|
- Better handling of OAuth errors in client
|
||||||
|
|
||||||
|
#### Spec
|
||||||
|
- Manifest: clarify relationship between urls and foreign layers
|
||||||
|
- Authorization: add support for repository classes
|
||||||
|
|
||||||
|
#### Manifest
|
||||||
|
- Override media type returned from `Stat()` for existing manifests
|
||||||
|
- Add plugin mediatype to distribution manifest
|
||||||
|
|
||||||
|
#### Docs
|
||||||
|
- Document `TOOMANYREQUESTS` error code
|
||||||
|
- Document required Let's Encrypt port
|
||||||
|
- Improve documentation around implementation of OAuth2
|
||||||
|
- Improve documentation for configuration
|
||||||
|
|
||||||
|
#### Auth
|
||||||
|
- Add support for registry type in scope
|
||||||
|
- Add support for using v2 ping challenges for v1
|
||||||
|
- Add leeway to JWT `nbf` and `exp` checking
|
||||||
|
- htpasswd: dynamically parse htpasswd file
|
||||||
|
- Fix missing auth headers with PATCH HTTP request when pushing to default port
|
||||||
|
|
||||||
|
#### Dockerfile
|
||||||
|
- Update to go1.7
|
||||||
|
- Reorder Dockerfile steps for better layer caching
|
||||||
|
|
||||||
|
#### Notes
|
||||||
|
|
||||||
|
Documentation has moved to the documentation repository at
|
||||||
|
`github.com/docker/docker.github.io/tree/master/registry`
|
||||||
|
|
||||||
|
The registry is go 1.7 compliant, and passes newer, more restrictive `lint` and `vet` ing.
|
||||||
|
|
||||||
|
|
||||||
## 2.5.0 (2016-06-14)
|
## 2.5.0 (2016-06-14)
|
||||||
|
|
||||||
### Storage
|
#### Storage
|
||||||
- Ensure uploads directory is cleaned after upload is commited
|
- Ensure uploads directory is cleaned after upload is committed
|
||||||
- Add ability to cap concurrent operations in filesystem driver
|
- Add ability to cap concurrent operations in filesystem driver
|
||||||
- S3: Add 'us-gov-west-1' to the valid region list
|
- S3: Add 'us-gov-west-1' to the valid region list
|
||||||
- Swift: Handle ceph not returning Last-Modified header for HEAD requests
|
- Swift: Handle ceph not returning Last-Modified header for HEAD requests
|
||||||
@ -23,13 +96,13 @@
|
|||||||
- Update the auth spec scope grammar to reflect the fact that hostnames are optionally supported
|
- Update the auth spec scope grammar to reflect the fact that hostnames are optionally supported
|
||||||
- Clarify API documentation around catalog fetch behavior
|
- Clarify API documentation around catalog fetch behavior
|
||||||
|
|
||||||
### API
|
#### API
|
||||||
- Support returning HTTP 429 (Too Many Requests)
|
- Support returning HTTP 429 (Too Many Requests)
|
||||||
|
|
||||||
### Documentation
|
#### Documentation
|
||||||
- Update auth documentation examples to show "expires in" as int
|
- Update auth documentation examples to show "expires in" as int
|
||||||
|
|
||||||
### Docker Image
|
#### Docker Image
|
||||||
- Use Alpine Linux as base image
|
- Use Alpine Linux as base image
|
||||||
|
|
||||||
|
|
||||||
|
8
src/vendor/github.com/docker/distribution/Dockerfile
generated
vendored
8
src/vendor/github.com/docker/distribution/Dockerfile
generated
vendored
@ -1,15 +1,15 @@
|
|||||||
FROM golang:1.6-alpine
|
FROM golang:1.7-alpine
|
||||||
|
|
||||||
ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution
|
ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution
|
||||||
ENV DOCKER_BUILDTAGS include_oss include_gcs
|
ENV DOCKER_BUILDTAGS include_oss include_gcs
|
||||||
|
|
||||||
|
RUN set -ex \
|
||||||
|
&& apk add --no-cache make git
|
||||||
|
|
||||||
WORKDIR $DISTRIBUTION_DIR
|
WORKDIR $DISTRIBUTION_DIR
|
||||||
COPY . $DISTRIBUTION_DIR
|
COPY . $DISTRIBUTION_DIR
|
||||||
COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml
|
COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml
|
||||||
|
|
||||||
RUN set -ex \
|
|
||||||
&& apk add --no-cache make git
|
|
||||||
|
|
||||||
RUN make PREFIX=/go clean binaries
|
RUN make PREFIX=/go clean binaries
|
||||||
|
|
||||||
VOLUME ["/var/lib/registry"]
|
VOLUME ["/var/lib/registry"]
|
||||||
|
17
src/vendor/github.com/docker/distribution/Makefile
generated
vendored
17
src/vendor/github.com/docker/distribution/Makefile
generated
vendored
@ -13,7 +13,7 @@ endif
|
|||||||
|
|
||||||
GO_LDFLAGS=-ldflags "-X `go list ./version`.Version=$(VERSION)"
|
GO_LDFLAGS=-ldflags "-X `go list ./version`.Version=$(VERSION)"
|
||||||
|
|
||||||
.PHONY: clean all fmt vet lint build test binaries
|
.PHONY: all build binaries clean dep-restore dep-save dep-validate fmt lint test test-full vet
|
||||||
.DEFAULT: all
|
.DEFAULT: all
|
||||||
all: fmt vet lint build test binaries
|
all: fmt vet lint build test binaries
|
||||||
|
|
||||||
@ -27,22 +27,25 @@ version/version.go:
|
|||||||
# Required for go 1.5 to build
|
# Required for go 1.5 to build
|
||||||
GO15VENDOREXPERIMENT := 1
|
GO15VENDOREXPERIMENT := 1
|
||||||
|
|
||||||
|
# Go files
|
||||||
|
GOFILES=$(shell find . -type f -name '*.go')
|
||||||
|
|
||||||
# Package list
|
# Package list
|
||||||
PKGS := $(shell go list -tags "${DOCKER_BUILDTAGS}" ./... | grep -v ^github.com/docker/distribution/vendor/)
|
PKGS=$(shell go list -tags "${DOCKER_BUILDTAGS}" ./... | grep -v ^github.com/docker/distribution/vendor/)
|
||||||
|
|
||||||
# Resolving binary dependencies for specific targets
|
# Resolving binary dependencies for specific targets
|
||||||
GOLINT := $(shell which golint || echo '')
|
GOLINT=$(shell which golint || echo '')
|
||||||
GODEP := $(shell which godep || echo '')
|
GODEP=$(shell which godep || echo '')
|
||||||
|
|
||||||
${PREFIX}/bin/registry: $(wildcard **/*.go)
|
${PREFIX}/bin/registry: $(GOFILES)
|
||||||
@echo "+ $@"
|
@echo "+ $@"
|
||||||
@go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry
|
@go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry
|
||||||
|
|
||||||
${PREFIX}/bin/digest: $(wildcard **/*.go)
|
${PREFIX}/bin/digest: $(GOFILES)
|
||||||
@echo "+ $@"
|
@echo "+ $@"
|
||||||
@go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/digest
|
@go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/digest
|
||||||
|
|
||||||
${PREFIX}/bin/registry-api-descriptor-template: $(wildcard **/*.go)
|
${PREFIX}/bin/registry-api-descriptor-template: $(GOFILES)
|
||||||
@echo "+ $@"
|
@echo "+ $@"
|
||||||
@go build -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry-api-descriptor-template
|
@go build -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry-api-descriptor-template
|
||||||
|
|
||||||
|
6
src/vendor/github.com/docker/distribution/README.md
generated
vendored
6
src/vendor/github.com/docker/distribution/README.md
generated
vendored
@ -19,7 +19,7 @@ This repository contains the following components:
|
|||||||
| **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. |
|
| **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. |
|
||||||
| **libraries** | A rich set of libraries for interacting with distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. |
|
| **libraries** | A rich set of libraries for interacting with distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. |
|
||||||
| **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) |
|
| **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) |
|
||||||
| **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/index.md) related just to the registry. |
|
| **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/) related just to the registry. |
|
||||||
|
|
||||||
### How does this integrate with Docker engine?
|
### How does this integrate with Docker engine?
|
||||||
|
|
||||||
@ -68,7 +68,7 @@ others, it is not.
|
|||||||
For example, users with their own software products may want to maintain a
|
For example, users with their own software products may want to maintain a
|
||||||
registry for private, company images. Also, you may wish to deploy your own
|
registry for private, company images. Also, you may wish to deploy your own
|
||||||
image repository for images used to test or in continuous integration. For these
|
image repository for images used to test or in continuous integration. For these
|
||||||
use cases and others, [deploying your own registry instance](docs/deploying.md)
|
use cases and others, [deploying your own registry instance](https://github.com/docker/docker.github.io/blob/master/registry/deploying.md)
|
||||||
may be the better choice.
|
may be the better choice.
|
||||||
|
|
||||||
### Migration to Registry 2.0
|
### Migration to Registry 2.0
|
||||||
@ -83,7 +83,7 @@ created. For more information see [docker/migrator]
|
|||||||
|
|
||||||
Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute
|
Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute
|
||||||
issues, fixes, and patches to this project. If you are contributing code, see
|
issues, fixes, and patches to this project. If you are contributing code, see
|
||||||
the instructions for [building a development environment](docs/recipes/building.md).
|
the instructions for [building a development environment](BUILDING.md).
|
||||||
|
|
||||||
## Support
|
## Support
|
||||||
|
|
||||||
|
36
src/vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md
generated
vendored
Normal file
36
src/vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
## Registry Release Checklist
|
||||||
|
|
||||||
|
10. Compile release notes detailing features and since the last release. Update the `CHANGELOG.md` file.
|
||||||
|
|
||||||
|
20. Update the version file: `https://github.com/docker/distribution/blob/master/version/version.go`
|
||||||
|
|
||||||
|
30. Update the `MAINTAINERS` (if necessary), `AUTHORS` and `.mailmap` files.
|
||||||
|
|
||||||
|
```
|
||||||
|
make AUTHORS
|
||||||
|
```
|
||||||
|
|
||||||
|
40. Create a signed tag.
|
||||||
|
|
||||||
|
Distribution uses semantic versioning. Tags are of the format `vx.y.z[-rcn]`
|
||||||
|
You will need PGP installed and a PGP key which has been added to your Github account. The comment for the tag should include the release notes.
|
||||||
|
|
||||||
|
50. Push the signed tag
|
||||||
|
|
||||||
|
60. Create a new [release](https://github.com/docker/distribution/releases). In the case of a release candidate, tick the `pre-release` checkbox.
|
||||||
|
|
||||||
|
70. Update the registry binary in [distribution library image repo](https://github.com/docker/distribution-library-image) by running the update script and opening a pull request.
|
||||||
|
|
||||||
|
80. Update the official image. Add the new version in the [official images repo](https://github.com/docker-library/official-images) by appending a new version to the `registry/registry` file with the git hash pointed to by the signed tag. Update the major version to point to the latest version and the minor version to point to new patch release if necessary.
|
||||||
|
e.g. to release `2.3.1`
|
||||||
|
|
||||||
|
`2.3.1 (new)`
|
||||||
|
|
||||||
|
`2.3.0 -> 2.3.0` can be removed
|
||||||
|
|
||||||
|
`2 -> 2.3.1`
|
||||||
|
|
||||||
|
`2.3 -> 2.3.1`
|
||||||
|
|
||||||
|
90. Build a new distribution/registry image on [Docker hub](https://hub.docker.com/u/distribution/dashboard) by adding a new automated build with the new tag and re-building the images.
|
||||||
|
|
12
src/vendor/github.com/docker/distribution/blobs.go
generated
vendored
12
src/vendor/github.com/docker/distribution/blobs.go
generated
vendored
@ -192,6 +192,18 @@ type BlobCreateOption interface {
|
|||||||
Apply(interface{}) error
|
Apply(interface{}) error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CreateOptions is a collection of blob creation modifiers relevant to general
|
||||||
|
// blob storage intended to be configured by the BlobCreateOption.Apply method.
|
||||||
|
type CreateOptions struct {
|
||||||
|
Mount struct {
|
||||||
|
ShouldMount bool
|
||||||
|
From reference.Canonical
|
||||||
|
// Stat allows to pass precalculated descriptor to link and return.
|
||||||
|
// Blob access check will be skipped if set.
|
||||||
|
Stat *Descriptor
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// BlobWriter provides a handle for inserting data into a blob store.
|
// BlobWriter provides a handle for inserting data into a blob store.
|
||||||
// Instances should be obtained from BlobWriteService.Writer and
|
// Instances should be obtained from BlobWriteService.Writer and
|
||||||
// BlobWriteService.Resume. If supported by the store, a writer can be
|
// BlobWriteService.Resume. If supported by the store, a writer can be
|
||||||
|
14
src/vendor/github.com/docker/distribution/circle.yml
generated
vendored
14
src/vendor/github.com/docker/distribution/circle.yml
generated
vendored
@ -8,7 +8,7 @@ machine:
|
|||||||
|
|
||||||
post:
|
post:
|
||||||
# go
|
# go
|
||||||
- gvm install go1.6 --prefer-binary --name=stable
|
- gvm install go1.7 --prefer-binary --name=stable
|
||||||
|
|
||||||
environment:
|
environment:
|
||||||
# Convenient shortcuts to "common" locations
|
# Convenient shortcuts to "common" locations
|
||||||
@ -49,9 +49,10 @@ test:
|
|||||||
# - gvm use old && go version
|
# - gvm use old && go version
|
||||||
- gvm use stable && go version
|
- gvm use stable && go version
|
||||||
|
|
||||||
|
# todo(richard): replace with a more robust vendoring solution. Removed due to a fundamental disagreement in godep philosophies.
|
||||||
# Ensure validation of dependencies
|
# Ensure validation of dependencies
|
||||||
- gvm use stable && if test -n "`git diff --stat=1000 master | grep -Ei \"vendor|godeps\"`"; then make dep-validate; fi:
|
# - gvm use stable && if test -n "`git diff --stat=1000 master | grep -Ei \"vendor|godeps\"`"; then make dep-validate; fi:
|
||||||
pwd: $BASE_STABLE
|
# pwd: $BASE_STABLE
|
||||||
|
|
||||||
# First thing: build everything. This will catch compile errors, and it's
|
# First thing: build everything. This will catch compile errors, and it's
|
||||||
# also necessary for go vet to work properly (see #807).
|
# also necessary for go vet to work properly (see #807).
|
||||||
@ -73,16 +74,19 @@ test:
|
|||||||
override:
|
override:
|
||||||
# Test stable, and report
|
# Test stable, and report
|
||||||
- gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | grep -v "/vendor/" | xargs -L 1 -I{} bash -c 'export PACKAGE={}; godep go test -tags "$DOCKER_BUILDTAGS" -test.short -coverprofile=$GOPATH/src/$PACKAGE/coverage.out -coverpkg=$(./coverpkg.sh $PACKAGE $ROOT_PACKAGE) $PACKAGE':
|
- gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | grep -v "/vendor/" | xargs -L 1 -I{} bash -c 'export PACKAGE={}; godep go test -tags "$DOCKER_BUILDTAGS" -test.short -coverprofile=$GOPATH/src/$PACKAGE/coverage.out -coverpkg=$(./coverpkg.sh $PACKAGE $ROOT_PACKAGE) $PACKAGE':
|
||||||
timeout: 600
|
timeout: 1000
|
||||||
pwd: $BASE_STABLE
|
pwd: $BASE_STABLE
|
||||||
|
|
||||||
|
# Test stable with race
|
||||||
|
- gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | grep -v "/vendor/" | grep -v "registry/handlers" | grep -v "registry/storage/driver" | xargs -L 1 -I{} bash -c 'export PACKAGE={}; godep go test -race -tags "$DOCKER_BUILDTAGS" -test.short $PACKAGE':
|
||||||
|
timeout: 1000
|
||||||
|
pwd: $BASE_STABLE
|
||||||
post:
|
post:
|
||||||
# Report to codecov
|
# Report to codecov
|
||||||
- bash <(curl -s https://codecov.io/bash):
|
- bash <(curl -s https://codecov.io/bash):
|
||||||
pwd: $BASE_STABLE
|
pwd: $BASE_STABLE
|
||||||
|
|
||||||
## Notes
|
## Notes
|
||||||
# Disabled the -race detector due to massive memory usage.
|
|
||||||
# Do we want these as well?
|
# Do we want these as well?
|
||||||
# - go get code.google.com/p/go.tools/cmd/goimports
|
# - go get code.google.com/p/go.tools/cmd/goimports
|
||||||
# - test -z "$(goimports -l -w ./... | tee /dev/stderr)"
|
# - test -z "$(goimports -l -w ./... | tee /dev/stderr)"
|
||||||
|
16
src/vendor/github.com/docker/distribution/context/http.go
generated
vendored
16
src/vendor/github.com/docker/distribution/context/http.go
generated
vendored
@ -103,20 +103,22 @@ func GetRequestID(ctx Context) string {
|
|||||||
// WithResponseWriter returns a new context and response writer that makes
|
// WithResponseWriter returns a new context and response writer that makes
|
||||||
// interesting response statistics available within the context.
|
// interesting response statistics available within the context.
|
||||||
func WithResponseWriter(ctx Context, w http.ResponseWriter) (Context, http.ResponseWriter) {
|
func WithResponseWriter(ctx Context, w http.ResponseWriter) (Context, http.ResponseWriter) {
|
||||||
irw := instrumentedResponseWriter{
|
|
||||||
ResponseWriter: w,
|
|
||||||
Context: ctx,
|
|
||||||
}
|
|
||||||
|
|
||||||
if closeNotifier, ok := w.(http.CloseNotifier); ok {
|
if closeNotifier, ok := w.(http.CloseNotifier); ok {
|
||||||
irwCN := &instrumentedResponseWriterCN{
|
irwCN := &instrumentedResponseWriterCN{
|
||||||
instrumentedResponseWriter: irw,
|
instrumentedResponseWriter: instrumentedResponseWriter{
|
||||||
CloseNotifier: closeNotifier,
|
ResponseWriter: w,
|
||||||
|
Context: ctx,
|
||||||
|
},
|
||||||
|
CloseNotifier: closeNotifier,
|
||||||
}
|
}
|
||||||
|
|
||||||
return irwCN, irwCN
|
return irwCN, irwCN
|
||||||
}
|
}
|
||||||
|
|
||||||
|
irw := instrumentedResponseWriter{
|
||||||
|
ResponseWriter: w,
|
||||||
|
Context: ctx,
|
||||||
|
}
|
||||||
return &irw, &irw
|
return &irw, &irw
|
||||||
}
|
}
|
||||||
|
|
||||||
|
7
src/vendor/github.com/docker/distribution/manifest/schema1/config_builder.go
generated
vendored
7
src/vendor/github.com/docker/distribution/manifest/schema1/config_builder.go
generated
vendored
@ -9,11 +9,10 @@ import (
|
|||||||
|
|
||||||
"github.com/docker/distribution"
|
"github.com/docker/distribution"
|
||||||
"github.com/docker/distribution/context"
|
"github.com/docker/distribution/context"
|
||||||
"github.com/docker/distribution/reference"
|
|
||||||
"github.com/docker/libtrust"
|
|
||||||
|
|
||||||
"github.com/docker/distribution/digest"
|
"github.com/docker/distribution/digest"
|
||||||
"github.com/docker/distribution/manifest"
|
"github.com/docker/distribution/manifest"
|
||||||
|
"github.com/docker/distribution/reference"
|
||||||
|
"github.com/docker/libtrust"
|
||||||
)
|
)
|
||||||
|
|
||||||
type diffID digest.Digest
|
type diffID digest.Digest
|
||||||
@ -95,7 +94,7 @@ func (mb *configManifestBuilder) Build(ctx context.Context) (m distribution.Mani
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(img.RootFS.DiffIDs) != len(mb.descriptors) {
|
if len(img.RootFS.DiffIDs) != len(mb.descriptors) {
|
||||||
return nil, errors.New("number of descriptors and number of layers in rootfs must match")
|
return nil, fmt.Errorf("number of descriptors and number of layers in rootfs must match: len(%v) != len(%v)", img.RootFS.DiffIDs, mb.descriptors)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Generate IDs for each layer
|
// Generate IDs for each layer
|
||||||
|
3
src/vendor/github.com/docker/distribution/manifest/schema2/builder.go
generated
vendored
3
src/vendor/github.com/docker/distribution/manifest/schema2/builder.go
generated
vendored
@ -46,6 +46,9 @@ func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) {
|
|||||||
m.Config, err = mb.bs.Stat(ctx, configDigest)
|
m.Config, err = mb.bs.Stat(ctx, configDigest)
|
||||||
switch err {
|
switch err {
|
||||||
case nil:
|
case nil:
|
||||||
|
// Override MediaType, since Put always replaces the specified media
|
||||||
|
// type with application/octet-stream in the descriptor it returns.
|
||||||
|
m.Config.MediaType = MediaTypeConfig
|
||||||
return FromStruct(m)
|
return FromStruct(m)
|
||||||
case distribution.ErrBlobUnknown:
|
case distribution.ErrBlobUnknown:
|
||||||
// nop
|
// nop
|
||||||
|
8
src/vendor/github.com/docker/distribution/manifest/schema2/manifest.go
generated
vendored
8
src/vendor/github.com/docker/distribution/manifest/schema2/manifest.go
generated
vendored
@ -17,6 +17,9 @@ const (
|
|||||||
// MediaTypeConfig specifies the mediaType for the image configuration.
|
// MediaTypeConfig specifies the mediaType for the image configuration.
|
||||||
MediaTypeConfig = "application/vnd.docker.container.image.v1+json"
|
MediaTypeConfig = "application/vnd.docker.container.image.v1+json"
|
||||||
|
|
||||||
|
// MediaTypePluginConfig specifies the mediaType for plugin configuration.
|
||||||
|
MediaTypePluginConfig = "application/vnd.docker.plugin.v1+json"
|
||||||
|
|
||||||
// MediaTypeLayer is the mediaType used for layers referenced by the
|
// MediaTypeLayer is the mediaType used for layers referenced by the
|
||||||
// manifest.
|
// manifest.
|
||||||
MediaTypeLayer = "application/vnd.docker.image.rootfs.diff.tar.gzip"
|
MediaTypeLayer = "application/vnd.docker.image.rootfs.diff.tar.gzip"
|
||||||
@ -66,7 +69,10 @@ type Manifest struct {
|
|||||||
|
|
||||||
// References returnes the descriptors of this manifests references.
|
// References returnes the descriptors of this manifests references.
|
||||||
func (m Manifest) References() []distribution.Descriptor {
|
func (m Manifest) References() []distribution.Descriptor {
|
||||||
return m.Layers
|
references := make([]distribution.Descriptor, 0, 1+len(m.Layers))
|
||||||
|
references = append(references, m.Config)
|
||||||
|
references = append(references, m.Layers...)
|
||||||
|
return references
|
||||||
}
|
}
|
||||||
|
|
||||||
// Target returns the target of this signed manifest.
|
// Target returns the target of this signed manifest.
|
||||||
|
6
src/vendor/github.com/docker/distribution/manifest/versioned.go
generated
vendored
6
src/vendor/github.com/docker/distribution/manifest/versioned.go
generated
vendored
@ -1,8 +1,8 @@
|
|||||||
package manifest
|
package manifest
|
||||||
|
|
||||||
// Versioned provides a struct with the manifest schemaVersion and . Incoming
|
// Versioned provides a struct with the manifest schemaVersion and mediaType.
|
||||||
// content with unknown schema version can be decoded against this struct to
|
// Incoming content with unknown schema version can be decoded against this
|
||||||
// check the version.
|
// struct to check the version.
|
||||||
type Versioned struct {
|
type Versioned struct {
|
||||||
// SchemaVersion is the image manifest schema that this image follows
|
// SchemaVersion is the image manifest schema that this image follows
|
||||||
SchemaVersion int `json:"schemaVersion"`
|
SchemaVersion int `json:"schemaVersion"`
|
||||||
|
12
src/vendor/github.com/docker/distribution/manifests.go
generated
vendored
12
src/vendor/github.com/docker/distribution/manifests.go
generated
vendored
@ -12,8 +12,13 @@ import (
|
|||||||
// references and an optional target
|
// references and an optional target
|
||||||
type Manifest interface {
|
type Manifest interface {
|
||||||
// References returns a list of objects which make up this manifest.
|
// References returns a list of objects which make up this manifest.
|
||||||
// The references are strictly ordered from base to head. A reference
|
// A reference is anything which can be represented by a
|
||||||
// is anything which can be represented by a distribution.Descriptor
|
// distribution.Descriptor. These can consist of layers, resources or other
|
||||||
|
// manifests.
|
||||||
|
//
|
||||||
|
// While no particular order is required, implementations should return
|
||||||
|
// them from highest to lowest priority. For example, one might want to
|
||||||
|
// return the base layer before the top layer.
|
||||||
References() []Descriptor
|
References() []Descriptor
|
||||||
|
|
||||||
// Payload provides the serialized format of the manifest, in addition to
|
// Payload provides the serialized format of the manifest, in addition to
|
||||||
@ -36,6 +41,9 @@ type ManifestBuilder interface {
|
|||||||
// AppendReference includes the given object in the manifest after any
|
// AppendReference includes the given object in the manifest after any
|
||||||
// existing dependencies. If the add fails, such as when adding an
|
// existing dependencies. If the add fails, such as when adding an
|
||||||
// unsupported dependency, an error may be returned.
|
// unsupported dependency, an error may be returned.
|
||||||
|
//
|
||||||
|
// The destination of the reference is dependent on the manifest type and
|
||||||
|
// the dependency type.
|
||||||
AppendReference(dependency Describable) error
|
AppendReference(dependency Describable) error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
40
src/vendor/github.com/docker/distribution/reference/reference.go
generated
vendored
40
src/vendor/github.com/docker/distribution/reference/reference.go
generated
vendored
@ -24,6 +24,8 @@ package reference
|
|||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/docker/distribution/digest"
|
"github.com/docker/distribution/digest"
|
||||||
)
|
)
|
||||||
@ -43,6 +45,9 @@ var (
|
|||||||
// ErrDigestInvalidFormat represents an error while trying to parse a string as a tag.
|
// ErrDigestInvalidFormat represents an error while trying to parse a string as a tag.
|
||||||
ErrDigestInvalidFormat = errors.New("invalid digest format")
|
ErrDigestInvalidFormat = errors.New("invalid digest format")
|
||||||
|
|
||||||
|
// ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters.
|
||||||
|
ErrNameContainsUppercase = errors.New("repository name must be lowercase")
|
||||||
|
|
||||||
// ErrNameEmpty is returned for empty, invalid repository names.
|
// ErrNameEmpty is returned for empty, invalid repository names.
|
||||||
ErrNameEmpty = errors.New("repository name must have at least one component")
|
ErrNameEmpty = errors.New("repository name must have at least one component")
|
||||||
|
|
||||||
@ -134,7 +139,7 @@ type Canonical interface {
|
|||||||
func SplitHostname(named Named) (string, string) {
|
func SplitHostname(named Named) (string, string) {
|
||||||
name := named.Name()
|
name := named.Name()
|
||||||
match := anchoredNameRegexp.FindStringSubmatch(name)
|
match := anchoredNameRegexp.FindStringSubmatch(name)
|
||||||
if match == nil || len(match) != 3 {
|
if len(match) != 3 {
|
||||||
return "", name
|
return "", name
|
||||||
}
|
}
|
||||||
return match[1], match[2]
|
return match[1], match[2]
|
||||||
@ -149,7 +154,9 @@ func Parse(s string) (Reference, error) {
|
|||||||
if s == "" {
|
if s == "" {
|
||||||
return nil, ErrNameEmpty
|
return nil, ErrNameEmpty
|
||||||
}
|
}
|
||||||
// TODO(dmcgowan): Provide more specific and helpful error
|
if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil {
|
||||||
|
return nil, ErrNameContainsUppercase
|
||||||
|
}
|
||||||
return nil, ErrReferenceInvalidFormat
|
return nil, ErrReferenceInvalidFormat
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -212,6 +219,13 @@ func WithTag(name Named, tag string) (NamedTagged, error) {
|
|||||||
if !anchoredTagRegexp.MatchString(tag) {
|
if !anchoredTagRegexp.MatchString(tag) {
|
||||||
return nil, ErrTagInvalidFormat
|
return nil, ErrTagInvalidFormat
|
||||||
}
|
}
|
||||||
|
if canonical, ok := name.(Canonical); ok {
|
||||||
|
return reference{
|
||||||
|
name: name.Name(),
|
||||||
|
tag: tag,
|
||||||
|
digest: canonical.Digest(),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
return taggedReference{
|
return taggedReference{
|
||||||
name: name.Name(),
|
name: name.Name(),
|
||||||
tag: tag,
|
tag: tag,
|
||||||
@ -224,12 +238,34 @@ func WithDigest(name Named, digest digest.Digest) (Canonical, error) {
|
|||||||
if !anchoredDigestRegexp.MatchString(digest.String()) {
|
if !anchoredDigestRegexp.MatchString(digest.String()) {
|
||||||
return nil, ErrDigestInvalidFormat
|
return nil, ErrDigestInvalidFormat
|
||||||
}
|
}
|
||||||
|
if tagged, ok := name.(Tagged); ok {
|
||||||
|
return reference{
|
||||||
|
name: name.Name(),
|
||||||
|
tag: tagged.Tag(),
|
||||||
|
digest: digest,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
return canonicalReference{
|
return canonicalReference{
|
||||||
name: name.Name(),
|
name: name.Name(),
|
||||||
digest: digest,
|
digest: digest,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Match reports whether ref matches the specified pattern.
|
||||||
|
// See https://godoc.org/path#Match for supported patterns.
|
||||||
|
func Match(pattern string, ref Reference) (bool, error) {
|
||||||
|
matched, err := path.Match(pattern, ref.String())
|
||||||
|
if namedRef, isNamed := ref.(Named); isNamed && !matched {
|
||||||
|
matched, _ = path.Match(pattern, namedRef.Name())
|
||||||
|
}
|
||||||
|
return matched, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// TrimNamed removes any tag or digest from the named reference.
|
||||||
|
func TrimNamed(ref Named) Named {
|
||||||
|
return repository(ref.Name())
|
||||||
|
}
|
||||||
|
|
||||||
func getBestReferenceType(ref reference) Reference {
|
func getBestReferenceType(ref reference) Reference {
|
||||||
if ref.name == "" {
|
if ref.name == "" {
|
||||||
// Allow digest only references
|
// Allow digest only references
|
||||||
|
2
src/vendor/github.com/docker/distribution/registry/api/errcode/register.go
generated
vendored
2
src/vendor/github.com/docker/distribution/registry/api/errcode/register.go
generated
vendored
@ -55,7 +55,7 @@ var (
|
|||||||
HTTPStatusCode: http.StatusForbidden,
|
HTTPStatusCode: http.StatusForbidden,
|
||||||
})
|
})
|
||||||
|
|
||||||
// ErrorCodeUnavailable provides a common error to report unavialability
|
// ErrorCodeUnavailable provides a common error to report unavailability
|
||||||
// of a service or endpoint.
|
// of a service or endpoint.
|
||||||
ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{
|
ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{
|
||||||
Value: "UNAVAILABLE",
|
Value: "UNAVAILABLE",
|
||||||
|
49
src/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go
generated
vendored
49
src/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go
generated
vendored
@ -175,6 +175,27 @@ var (
|
|||||||
errcode.ErrorCodeDenied,
|
errcode.ErrorCodeDenied,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
tooManyRequestsDescriptor = ResponseDescriptor{
|
||||||
|
Name: "Too Many Requests",
|
||||||
|
StatusCode: http.StatusTooManyRequests,
|
||||||
|
Description: "The client made too many requests within a time interval.",
|
||||||
|
Headers: []ParameterDescriptor{
|
||||||
|
{
|
||||||
|
Name: "Content-Length",
|
||||||
|
Type: "integer",
|
||||||
|
Description: "Length of the JSON response body.",
|
||||||
|
Format: "<length>",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Body: BodyDescriptor{
|
||||||
|
ContentType: "application/json; charset=utf-8",
|
||||||
|
Format: errorsBody,
|
||||||
|
},
|
||||||
|
ErrorCodes: []errcode.ErrorCode{
|
||||||
|
errcode.ErrorCodeTooManyRequests,
|
||||||
|
},
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -202,17 +223,6 @@ const (
|
|||||||
...
|
...
|
||||||
]
|
]
|
||||||
}`
|
}`
|
||||||
|
|
||||||
unauthorizedErrorsBody = `{
|
|
||||||
"errors:" [
|
|
||||||
{
|
|
||||||
"code": "UNAUTHORIZED",
|
|
||||||
"message": "access to the requested resource is not authorized",
|
|
||||||
"detail": ...
|
|
||||||
},
|
|
||||||
...
|
|
||||||
]
|
|
||||||
}`
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// APIDescriptor exports descriptions of the layout of the v2 registry API.
|
// APIDescriptor exports descriptions of the layout of the v2 registry API.
|
||||||
@ -391,6 +401,7 @@ var routeDescriptors = []RouteDescriptor{
|
|||||||
StatusCode: http.StatusNotFound,
|
StatusCode: http.StatusNotFound,
|
||||||
},
|
},
|
||||||
unauthorizedResponseDescriptor,
|
unauthorizedResponseDescriptor,
|
||||||
|
tooManyRequestsDescriptor,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -445,6 +456,7 @@ var routeDescriptors = []RouteDescriptor{
|
|||||||
unauthorizedResponseDescriptor,
|
unauthorizedResponseDescriptor,
|
||||||
repositoryNotFoundResponseDescriptor,
|
repositoryNotFoundResponseDescriptor,
|
||||||
deniedResponseDescriptor,
|
deniedResponseDescriptor,
|
||||||
|
tooManyRequestsDescriptor,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -481,6 +493,7 @@ var routeDescriptors = []RouteDescriptor{
|
|||||||
unauthorizedResponseDescriptor,
|
unauthorizedResponseDescriptor,
|
||||||
repositoryNotFoundResponseDescriptor,
|
repositoryNotFoundResponseDescriptor,
|
||||||
deniedResponseDescriptor,
|
deniedResponseDescriptor,
|
||||||
|
tooManyRequestsDescriptor,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -535,6 +548,7 @@ var routeDescriptors = []RouteDescriptor{
|
|||||||
unauthorizedResponseDescriptor,
|
unauthorizedResponseDescriptor,
|
||||||
repositoryNotFoundResponseDescriptor,
|
repositoryNotFoundResponseDescriptor,
|
||||||
deniedResponseDescriptor,
|
deniedResponseDescriptor,
|
||||||
|
tooManyRequestsDescriptor,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -592,6 +606,7 @@ var routeDescriptors = []RouteDescriptor{
|
|||||||
unauthorizedResponseDescriptor,
|
unauthorizedResponseDescriptor,
|
||||||
repositoryNotFoundResponseDescriptor,
|
repositoryNotFoundResponseDescriptor,
|
||||||
deniedResponseDescriptor,
|
deniedResponseDescriptor,
|
||||||
|
tooManyRequestsDescriptor,
|
||||||
{
|
{
|
||||||
Name: "Missing Layer(s)",
|
Name: "Missing Layer(s)",
|
||||||
Description: "One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response.",
|
Description: "One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response.",
|
||||||
@ -661,6 +676,7 @@ var routeDescriptors = []RouteDescriptor{
|
|||||||
unauthorizedResponseDescriptor,
|
unauthorizedResponseDescriptor,
|
||||||
repositoryNotFoundResponseDescriptor,
|
repositoryNotFoundResponseDescriptor,
|
||||||
deniedResponseDescriptor,
|
deniedResponseDescriptor,
|
||||||
|
tooManyRequestsDescriptor,
|
||||||
{
|
{
|
||||||
Name: "Unknown Manifest",
|
Name: "Unknown Manifest",
|
||||||
Description: "The specified `name` or `reference` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned.",
|
Description: "The specified `name` or `reference` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned.",
|
||||||
@ -769,6 +785,7 @@ var routeDescriptors = []RouteDescriptor{
|
|||||||
unauthorizedResponseDescriptor,
|
unauthorizedResponseDescriptor,
|
||||||
repositoryNotFoundResponseDescriptor,
|
repositoryNotFoundResponseDescriptor,
|
||||||
deniedResponseDescriptor,
|
deniedResponseDescriptor,
|
||||||
|
tooManyRequestsDescriptor,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -843,6 +860,7 @@ var routeDescriptors = []RouteDescriptor{
|
|||||||
unauthorizedResponseDescriptor,
|
unauthorizedResponseDescriptor,
|
||||||
repositoryNotFoundResponseDescriptor,
|
repositoryNotFoundResponseDescriptor,
|
||||||
deniedResponseDescriptor,
|
deniedResponseDescriptor,
|
||||||
|
tooManyRequestsDescriptor,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -909,6 +927,7 @@ var routeDescriptors = []RouteDescriptor{
|
|||||||
unauthorizedResponseDescriptor,
|
unauthorizedResponseDescriptor,
|
||||||
repositoryNotFoundResponseDescriptor,
|
repositoryNotFoundResponseDescriptor,
|
||||||
deniedResponseDescriptor,
|
deniedResponseDescriptor,
|
||||||
|
tooManyRequestsDescriptor,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -993,6 +1012,7 @@ var routeDescriptors = []RouteDescriptor{
|
|||||||
unauthorizedResponseDescriptor,
|
unauthorizedResponseDescriptor,
|
||||||
repositoryNotFoundResponseDescriptor,
|
repositoryNotFoundResponseDescriptor,
|
||||||
deniedResponseDescriptor,
|
deniedResponseDescriptor,
|
||||||
|
tooManyRequestsDescriptor,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -1039,6 +1059,7 @@ var routeDescriptors = []RouteDescriptor{
|
|||||||
unauthorizedResponseDescriptor,
|
unauthorizedResponseDescriptor,
|
||||||
repositoryNotFoundResponseDescriptor,
|
repositoryNotFoundResponseDescriptor,
|
||||||
deniedResponseDescriptor,
|
deniedResponseDescriptor,
|
||||||
|
tooManyRequestsDescriptor,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -1103,6 +1124,7 @@ var routeDescriptors = []RouteDescriptor{
|
|||||||
unauthorizedResponseDescriptor,
|
unauthorizedResponseDescriptor,
|
||||||
repositoryNotFoundResponseDescriptor,
|
repositoryNotFoundResponseDescriptor,
|
||||||
deniedResponseDescriptor,
|
deniedResponseDescriptor,
|
||||||
|
tooManyRequestsDescriptor,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -1175,6 +1197,7 @@ var routeDescriptors = []RouteDescriptor{
|
|||||||
unauthorizedResponseDescriptor,
|
unauthorizedResponseDescriptor,
|
||||||
repositoryNotFoundResponseDescriptor,
|
repositoryNotFoundResponseDescriptor,
|
||||||
deniedResponseDescriptor,
|
deniedResponseDescriptor,
|
||||||
|
tooManyRequestsDescriptor,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -1249,6 +1272,7 @@ var routeDescriptors = []RouteDescriptor{
|
|||||||
unauthorizedResponseDescriptor,
|
unauthorizedResponseDescriptor,
|
||||||
repositoryNotFoundResponseDescriptor,
|
repositoryNotFoundResponseDescriptor,
|
||||||
deniedResponseDescriptor,
|
deniedResponseDescriptor,
|
||||||
|
tooManyRequestsDescriptor,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -1334,6 +1358,7 @@ var routeDescriptors = []RouteDescriptor{
|
|||||||
unauthorizedResponseDescriptor,
|
unauthorizedResponseDescriptor,
|
||||||
repositoryNotFoundResponseDescriptor,
|
repositoryNotFoundResponseDescriptor,
|
||||||
deniedResponseDescriptor,
|
deniedResponseDescriptor,
|
||||||
|
tooManyRequestsDescriptor,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -1424,6 +1449,7 @@ var routeDescriptors = []RouteDescriptor{
|
|||||||
unauthorizedResponseDescriptor,
|
unauthorizedResponseDescriptor,
|
||||||
repositoryNotFoundResponseDescriptor,
|
repositoryNotFoundResponseDescriptor,
|
||||||
deniedResponseDescriptor,
|
deniedResponseDescriptor,
|
||||||
|
tooManyRequestsDescriptor,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -1480,6 +1506,7 @@ var routeDescriptors = []RouteDescriptor{
|
|||||||
unauthorizedResponseDescriptor,
|
unauthorizedResponseDescriptor,
|
||||||
repositoryNotFoundResponseDescriptor,
|
repositoryNotFoundResponseDescriptor,
|
||||||
deniedResponseDescriptor,
|
deniedResponseDescriptor,
|
||||||
|
tooManyRequestsDescriptor,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
161
src/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go
generated
vendored
Normal file
161
src/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go
generated
vendored
Normal file
@ -0,0 +1,161 @@
|
|||||||
|
package v2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
"unicode"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// according to rfc7230
|
||||||
|
reToken = regexp.MustCompile(`^[^"(),/:;<=>?@[\]{}[:space:][:cntrl:]]+`)
|
||||||
|
reQuotedValue = regexp.MustCompile(`^[^\\"]+`)
|
||||||
|
reEscapedCharacter = regexp.MustCompile(`^[[:blank:][:graph:]]`)
|
||||||
|
)
|
||||||
|
|
||||||
|
// parseForwardedHeader is a benevolent parser of Forwarded header defined in rfc7239. The header contains
|
||||||
|
// a comma-separated list of forwarding key-value pairs. Each list element is set by single proxy. The
|
||||||
|
// function parses only the first element of the list, which is set by the very first proxy. It returns a map
|
||||||
|
// of corresponding key-value pairs and an unparsed slice of the input string.
|
||||||
|
//
|
||||||
|
// Examples of Forwarded header values:
|
||||||
|
//
|
||||||
|
// 1. Forwarded: For=192.0.2.43; Proto=https,For="[2001:db8:cafe::17]",For=unknown
|
||||||
|
// 2. Forwarded: for="192.0.2.43:443"; host="registry.example.org", for="10.10.05.40:80"
|
||||||
|
//
|
||||||
|
// The first will be parsed into {"for": "192.0.2.43", "proto": "https"} while the second into
|
||||||
|
// {"for": "192.0.2.43:443", "host": "registry.example.org"}.
|
||||||
|
func parseForwardedHeader(forwarded string) (map[string]string, string, error) {
|
||||||
|
// Following are states of forwarded header parser. Any state could transition to a failure.
|
||||||
|
const (
|
||||||
|
// terminating state; can transition to Parameter
|
||||||
|
stateElement = iota
|
||||||
|
// terminating state; can transition to KeyValueDelimiter
|
||||||
|
stateParameter
|
||||||
|
// can transition to Value
|
||||||
|
stateKeyValueDelimiter
|
||||||
|
// can transition to one of { QuotedValue, PairEnd }
|
||||||
|
stateValue
|
||||||
|
// can transition to one of { EscapedCharacter, PairEnd }
|
||||||
|
stateQuotedValue
|
||||||
|
// can transition to one of { QuotedValue }
|
||||||
|
stateEscapedCharacter
|
||||||
|
// terminating state; can transition to one of { Parameter, Element }
|
||||||
|
statePairEnd
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
parameter string
|
||||||
|
value string
|
||||||
|
parse = forwarded[:]
|
||||||
|
res = map[string]string{}
|
||||||
|
state = stateElement
|
||||||
|
)
|
||||||
|
|
||||||
|
Loop:
|
||||||
|
for {
|
||||||
|
// skip spaces unless in quoted value
|
||||||
|
if state != stateQuotedValue && state != stateEscapedCharacter {
|
||||||
|
parse = strings.TrimLeftFunc(parse, unicode.IsSpace)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(parse) == 0 {
|
||||||
|
if state != stateElement && state != statePairEnd && state != stateParameter {
|
||||||
|
return nil, parse, fmt.Errorf("unexpected end of input")
|
||||||
|
}
|
||||||
|
// terminating
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
switch state {
|
||||||
|
// terminate at list element delimiter
|
||||||
|
case stateElement:
|
||||||
|
if parse[0] == ',' {
|
||||||
|
parse = parse[1:]
|
||||||
|
break Loop
|
||||||
|
}
|
||||||
|
state = stateParameter
|
||||||
|
|
||||||
|
// parse parameter (the key of key-value pair)
|
||||||
|
case stateParameter:
|
||||||
|
match := reToken.FindString(parse)
|
||||||
|
if len(match) == 0 {
|
||||||
|
return nil, parse, fmt.Errorf("failed to parse token at position %d", len(forwarded)-len(parse))
|
||||||
|
}
|
||||||
|
parameter = strings.ToLower(match)
|
||||||
|
parse = parse[len(match):]
|
||||||
|
state = stateKeyValueDelimiter
|
||||||
|
|
||||||
|
// parse '='
|
||||||
|
case stateKeyValueDelimiter:
|
||||||
|
if parse[0] != '=' {
|
||||||
|
return nil, parse, fmt.Errorf("expected '=', not '%c' at position %d", parse[0], len(forwarded)-len(parse))
|
||||||
|
}
|
||||||
|
parse = parse[1:]
|
||||||
|
state = stateValue
|
||||||
|
|
||||||
|
// parse value or quoted value
|
||||||
|
case stateValue:
|
||||||
|
if parse[0] == '"' {
|
||||||
|
parse = parse[1:]
|
||||||
|
state = stateQuotedValue
|
||||||
|
} else {
|
||||||
|
value = reToken.FindString(parse)
|
||||||
|
if len(value) == 0 {
|
||||||
|
return nil, parse, fmt.Errorf("failed to parse value at position %d", len(forwarded)-len(parse))
|
||||||
|
}
|
||||||
|
if _, exists := res[parameter]; exists {
|
||||||
|
return nil, parse, fmt.Errorf("duplicate parameter %q at position %d", parameter, len(forwarded)-len(parse))
|
||||||
|
}
|
||||||
|
res[parameter] = value
|
||||||
|
parse = parse[len(value):]
|
||||||
|
value = ""
|
||||||
|
state = statePairEnd
|
||||||
|
}
|
||||||
|
|
||||||
|
// parse a part of quoted value until the first backslash
|
||||||
|
case stateQuotedValue:
|
||||||
|
match := reQuotedValue.FindString(parse)
|
||||||
|
value += match
|
||||||
|
parse = parse[len(match):]
|
||||||
|
switch {
|
||||||
|
case len(parse) == 0:
|
||||||
|
return nil, parse, fmt.Errorf("unterminated quoted string")
|
||||||
|
case parse[0] == '"':
|
||||||
|
res[parameter] = value
|
||||||
|
value = ""
|
||||||
|
parse = parse[1:]
|
||||||
|
state = statePairEnd
|
||||||
|
case parse[0] == '\\':
|
||||||
|
parse = parse[1:]
|
||||||
|
state = stateEscapedCharacter
|
||||||
|
}
|
||||||
|
|
||||||
|
// parse escaped character in a quoted string, ignore the backslash
|
||||||
|
// transition back to QuotedValue state
|
||||||
|
case stateEscapedCharacter:
|
||||||
|
c := reEscapedCharacter.FindString(parse)
|
||||||
|
if len(c) == 0 {
|
||||||
|
return nil, parse, fmt.Errorf("invalid escape sequence at position %d", len(forwarded)-len(parse)-1)
|
||||||
|
}
|
||||||
|
value += c
|
||||||
|
parse = parse[1:]
|
||||||
|
state = stateQuotedValue
|
||||||
|
|
||||||
|
// expect either a new key-value pair, new list or end of input
|
||||||
|
case statePairEnd:
|
||||||
|
switch parse[0] {
|
||||||
|
case ';':
|
||||||
|
parse = parse[1:]
|
||||||
|
state = stateParameter
|
||||||
|
case ',':
|
||||||
|
state = stateElement
|
||||||
|
default:
|
||||||
|
return nil, parse, fmt.Errorf("expected ',' or ';', not %c at position %d", parse[0], len(forwarded)-len(parse))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return res, parse, nil
|
||||||
|
}
|
67
src/vendor/github.com/docker/distribution/registry/api/v2/urls.go
generated
vendored
67
src/vendor/github.com/docker/distribution/registry/api/v2/urls.go
generated
vendored
@ -1,8 +1,10 @@
|
|||||||
package v2
|
package v2
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/docker/distribution/reference"
|
"github.com/docker/distribution/reference"
|
||||||
@ -49,10 +51,14 @@ func NewURLBuilderFromRequest(r *http.Request, relative bool) *URLBuilder {
|
|||||||
var scheme string
|
var scheme string
|
||||||
|
|
||||||
forwardedProto := r.Header.Get("X-Forwarded-Proto")
|
forwardedProto := r.Header.Get("X-Forwarded-Proto")
|
||||||
|
// TODO: log the error
|
||||||
|
forwardedHeader, _, _ := parseForwardedHeader(r.Header.Get("Forwarded"))
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
case len(forwardedProto) > 0:
|
case len(forwardedProto) > 0:
|
||||||
scheme = forwardedProto
|
scheme = forwardedProto
|
||||||
|
case len(forwardedHeader["proto"]) > 0:
|
||||||
|
scheme = forwardedHeader["proto"]
|
||||||
case r.TLS != nil:
|
case r.TLS != nil:
|
||||||
scheme = "https"
|
scheme = "https"
|
||||||
case len(r.URL.Scheme) > 0:
|
case len(r.URL.Scheme) > 0:
|
||||||
@ -62,14 +68,46 @@ func NewURLBuilderFromRequest(r *http.Request, relative bool) *URLBuilder {
|
|||||||
}
|
}
|
||||||
|
|
||||||
host := r.Host
|
host := r.Host
|
||||||
forwardedHost := r.Header.Get("X-Forwarded-Host")
|
|
||||||
if len(forwardedHost) > 0 {
|
if forwardedHost := r.Header.Get("X-Forwarded-Host"); len(forwardedHost) > 0 {
|
||||||
// According to the Apache mod_proxy docs, X-Forwarded-Host can be a
|
// According to the Apache mod_proxy docs, X-Forwarded-Host can be a
|
||||||
// comma-separated list of hosts, to which each proxy appends the
|
// comma-separated list of hosts, to which each proxy appends the
|
||||||
// requested host. We want to grab the first from this comma-separated
|
// requested host. We want to grab the first from this comma-separated
|
||||||
// list.
|
// list.
|
||||||
hosts := strings.SplitN(forwardedHost, ",", 2)
|
hosts := strings.SplitN(forwardedHost, ",", 2)
|
||||||
host = strings.TrimSpace(hosts[0])
|
host = strings.TrimSpace(hosts[0])
|
||||||
|
} else if addr, exists := forwardedHeader["for"]; exists {
|
||||||
|
host = addr
|
||||||
|
} else if h, exists := forwardedHeader["host"]; exists {
|
||||||
|
host = h
|
||||||
|
}
|
||||||
|
|
||||||
|
portLessHost, port := host, ""
|
||||||
|
if !isIPv6Address(portLessHost) {
|
||||||
|
// with go 1.6, this would treat the last part of IPv6 address as a port
|
||||||
|
portLessHost, port, _ = net.SplitHostPort(host)
|
||||||
|
}
|
||||||
|
if forwardedPort := r.Header.Get("X-Forwarded-Port"); len(port) == 0 && len(forwardedPort) > 0 {
|
||||||
|
ports := strings.SplitN(forwardedPort, ",", 2)
|
||||||
|
forwardedPort = strings.TrimSpace(ports[0])
|
||||||
|
if _, err := strconv.ParseInt(forwardedPort, 10, 32); err == nil {
|
||||||
|
port = forwardedPort
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(portLessHost) > 0 {
|
||||||
|
host = portLessHost
|
||||||
|
}
|
||||||
|
if len(port) > 0 {
|
||||||
|
// remove enclosing brackets of ipv6 address otherwise they will be duplicated
|
||||||
|
if len(host) > 1 && host[0] == '[' && host[len(host)-1] == ']' {
|
||||||
|
host = host[1 : len(host)-1]
|
||||||
|
}
|
||||||
|
// JoinHostPort properly encloses ipv6 addresses in square brackets
|
||||||
|
host = net.JoinHostPort(host, port)
|
||||||
|
} else if isIPv6Address(host) && host[0] != '[' {
|
||||||
|
// ipv6 needs to be enclosed in square brackets in urls
|
||||||
|
host = "[" + host + "]"
|
||||||
}
|
}
|
||||||
|
|
||||||
basePath := routeDescriptorsMap[RouteNameBase].Path
|
basePath := routeDescriptorsMap[RouteNameBase].Path
|
||||||
@ -249,3 +287,28 @@ func appendValues(u string, values ...url.Values) string {
|
|||||||
|
|
||||||
return appendValuesURL(up, values...).String()
|
return appendValuesURL(up, values...).String()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// isIPv6Address returns true if given string is a valid IPv6 address. No port is allowed. The address may be
|
||||||
|
// enclosed in square brackets.
|
||||||
|
func isIPv6Address(host string) bool {
|
||||||
|
if len(host) > 1 && host[0] == '[' && host[len(host)-1] == ']' {
|
||||||
|
host = host[1 : len(host)-1]
|
||||||
|
}
|
||||||
|
// The IPv6 scoped addressing zone identifier starts after the last percent sign.
|
||||||
|
if i := strings.LastIndexByte(host, '%'); i > 0 {
|
||||||
|
host = host[:i]
|
||||||
|
}
|
||||||
|
ip := net.ParseIP(host)
|
||||||
|
if ip == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if ip.To16() == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if ip.To4() == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
// dot can be present in ipv4-mapped address, it needs to come after a colon though
|
||||||
|
i := strings.IndexAny(host, ":.")
|
||||||
|
return i >= 0 && host[i] == ':'
|
||||||
|
}
|
||||||
|
38
src/vendor/github.com/docker/distribution/registry/auth/auth.go
generated
vendored
38
src/vendor/github.com/docker/distribution/registry/auth/auth.go
generated
vendored
@ -66,8 +66,9 @@ type UserInfo struct {
|
|||||||
|
|
||||||
// Resource describes a resource by type and name.
|
// Resource describes a resource by type and name.
|
||||||
type Resource struct {
|
type Resource struct {
|
||||||
Type string
|
Type string
|
||||||
Name string
|
Class string
|
||||||
|
Name string
|
||||||
}
|
}
|
||||||
|
|
||||||
// Access describes a specific action that is
|
// Access describes a specific action that is
|
||||||
@ -135,6 +136,39 @@ func (uic userInfoContext) Value(key interface{}) interface{} {
|
|||||||
return uic.Context.Value(key)
|
return uic.Context.Value(key)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithResources returns a context with the authorized resources.
|
||||||
|
func WithResources(ctx context.Context, resources []Resource) context.Context {
|
||||||
|
return resourceContext{
|
||||||
|
Context: ctx,
|
||||||
|
resources: resources,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type resourceContext struct {
|
||||||
|
context.Context
|
||||||
|
resources []Resource
|
||||||
|
}
|
||||||
|
|
||||||
|
type resourceKey struct{}
|
||||||
|
|
||||||
|
func (rc resourceContext) Value(key interface{}) interface{} {
|
||||||
|
if key == (resourceKey{}) {
|
||||||
|
return rc.resources
|
||||||
|
}
|
||||||
|
|
||||||
|
return rc.Context.Value(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AuthorizedResources returns the list of resources which have
|
||||||
|
// been authorized for this request.
|
||||||
|
func AuthorizedResources(ctx context.Context) []Resource {
|
||||||
|
if resources, ok := ctx.Value(resourceKey{}).([]Resource); ok {
|
||||||
|
return resources
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// InitFunc is the type of an AccessController factory function and is used
|
// InitFunc is the type of an AccessController factory function and is used
|
||||||
// to register the constructor for different AccesController backends.
|
// to register the constructor for different AccesController backends.
|
||||||
type InitFunc func(options map[string]interface{}) (AccessController, error)
|
type InitFunc func(options map[string]interface{}) (AccessController, error)
|
||||||
|
14
src/vendor/github.com/docker/distribution/registry/auth/token/accesscontroller.go
generated
vendored
14
src/vendor/github.com/docker/distribution/registry/auth/token/accesscontroller.go
generated
vendored
@ -176,12 +176,14 @@ func newAccessController(options map[string]interface{}) (auth.AccessController,
|
|||||||
var rootCerts []*x509.Certificate
|
var rootCerts []*x509.Certificate
|
||||||
pemBlock, rawCertBundle := pem.Decode(rawCertBundle)
|
pemBlock, rawCertBundle := pem.Decode(rawCertBundle)
|
||||||
for pemBlock != nil {
|
for pemBlock != nil {
|
||||||
cert, err := x509.ParseCertificate(pemBlock.Bytes)
|
if pemBlock.Type == "CERTIFICATE" {
|
||||||
if err != nil {
|
cert, err := x509.ParseCertificate(pemBlock.Bytes)
|
||||||
return nil, fmt.Errorf("unable to parse token auth root certificate: %s", err)
|
if err != nil {
|
||||||
}
|
return nil, fmt.Errorf("unable to parse token auth root certificate: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
rootCerts = append(rootCerts, cert)
|
rootCerts = append(rootCerts, cert)
|
||||||
|
}
|
||||||
|
|
||||||
pemBlock, rawCertBundle = pem.Decode(rawCertBundle)
|
pemBlock, rawCertBundle = pem.Decode(rawCertBundle)
|
||||||
}
|
}
|
||||||
@ -259,6 +261,8 @@ func (ac *accessController) Authorized(ctx context.Context, accessItems ...auth.
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ctx = auth.WithResources(ctx, token.resources())
|
||||||
|
|
||||||
return auth.WithUser(ctx, auth.UserInfo{Name: token.Claims.Subject}), nil
|
return auth.WithUser(ctx, auth.UserInfo{Name: token.Claims.Subject}), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
53
src/vendor/github.com/docker/distribution/registry/auth/token/token.go
generated
vendored
53
src/vendor/github.com/docker/distribution/registry/auth/token/token.go
generated
vendored
@ -20,6 +20,9 @@ const (
|
|||||||
// TokenSeparator is the value which separates the header, claims, and
|
// TokenSeparator is the value which separates the header, claims, and
|
||||||
// signature in the compact serialization of a JSON Web Token.
|
// signature in the compact serialization of a JSON Web Token.
|
||||||
TokenSeparator = "."
|
TokenSeparator = "."
|
||||||
|
// Leeway is the Duration that will be added to NBF and EXP claim
|
||||||
|
// checks to account for clock skew as per https://tools.ietf.org/html/rfc7519#section-4.1.5
|
||||||
|
Leeway = 60 * time.Second
|
||||||
)
|
)
|
||||||
|
|
||||||
// Errors used by token parsing and verification.
|
// Errors used by token parsing and verification.
|
||||||
@ -31,6 +34,7 @@ var (
|
|||||||
// ResourceActions stores allowed actions on a named and typed resource.
|
// ResourceActions stores allowed actions on a named and typed resource.
|
||||||
type ResourceActions struct {
|
type ResourceActions struct {
|
||||||
Type string `json:"type"`
|
Type string `json:"type"`
|
||||||
|
Class string `json:"class,omitempty"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Actions []string `json:"actions"`
|
Actions []string `json:"actions"`
|
||||||
}
|
}
|
||||||
@ -92,7 +96,7 @@ func NewToken(rawToken string) (*Token, error) {
|
|||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("error while unmarshalling raw token: %s", err)
|
log.Infof("error while unmarshalling raw token: %s", err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@ -132,39 +136,47 @@ func NewToken(rawToken string) (*Token, error) {
|
|||||||
func (t *Token) Verify(verifyOpts VerifyOptions) error {
|
func (t *Token) Verify(verifyOpts VerifyOptions) error {
|
||||||
// Verify that the Issuer claim is a trusted authority.
|
// Verify that the Issuer claim is a trusted authority.
|
||||||
if !contains(verifyOpts.TrustedIssuers, t.Claims.Issuer) {
|
if !contains(verifyOpts.TrustedIssuers, t.Claims.Issuer) {
|
||||||
log.Errorf("token from untrusted issuer: %q", t.Claims.Issuer)
|
log.Infof("token from untrusted issuer: %q", t.Claims.Issuer)
|
||||||
return ErrInvalidToken
|
return ErrInvalidToken
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify that the Audience claim is allowed.
|
// Verify that the Audience claim is allowed.
|
||||||
if !contains(verifyOpts.AcceptedAudiences, t.Claims.Audience) {
|
if !contains(verifyOpts.AcceptedAudiences, t.Claims.Audience) {
|
||||||
log.Errorf("token intended for another audience: %q", t.Claims.Audience)
|
log.Infof("token intended for another audience: %q", t.Claims.Audience)
|
||||||
return ErrInvalidToken
|
return ErrInvalidToken
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify that the token is currently usable and not expired.
|
// Verify that the token is currently usable and not expired.
|
||||||
currentUnixTime := time.Now().Unix()
|
currentTime := time.Now()
|
||||||
if !(t.Claims.NotBefore <= currentUnixTime && currentUnixTime <= t.Claims.Expiration) {
|
|
||||||
log.Errorf("token not to be used before %d or after %d - currently %d", t.Claims.NotBefore, t.Claims.Expiration, currentUnixTime)
|
ExpWithLeeway := time.Unix(t.Claims.Expiration, 0).Add(Leeway)
|
||||||
|
if currentTime.After(ExpWithLeeway) {
|
||||||
|
log.Infof("token not to be used after %s - currently %s", ExpWithLeeway, currentTime)
|
||||||
|
return ErrInvalidToken
|
||||||
|
}
|
||||||
|
|
||||||
|
NotBeforeWithLeeway := time.Unix(t.Claims.NotBefore, 0).Add(-Leeway)
|
||||||
|
if currentTime.Before(NotBeforeWithLeeway) {
|
||||||
|
log.Infof("token not to be used before %s - currently %s", NotBeforeWithLeeway, currentTime)
|
||||||
return ErrInvalidToken
|
return ErrInvalidToken
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify the token signature.
|
// Verify the token signature.
|
||||||
if len(t.Signature) == 0 {
|
if len(t.Signature) == 0 {
|
||||||
log.Error("token has no signature")
|
log.Info("token has no signature")
|
||||||
return ErrInvalidToken
|
return ErrInvalidToken
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify that the signing key is trusted.
|
// Verify that the signing key is trusted.
|
||||||
signingKey, err := t.VerifySigningKey(verifyOpts)
|
signingKey, err := t.VerifySigningKey(verifyOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(err)
|
log.Info(err)
|
||||||
return ErrInvalidToken
|
return ErrInvalidToken
|
||||||
}
|
}
|
||||||
|
|
||||||
// Finally, verify the signature of the token using the key which signed it.
|
// Finally, verify the signature of the token using the key which signed it.
|
||||||
if err := signingKey.Verify(strings.NewReader(t.Raw), t.Header.SigningAlg, t.Signature); err != nil {
|
if err := signingKey.Verify(strings.NewReader(t.Raw), t.Header.SigningAlg, t.Signature); err != nil {
|
||||||
log.Errorf("unable to verify token signature: %s", err)
|
log.Infof("unable to verify token signature: %s", err)
|
||||||
return ErrInvalidToken
|
return ErrInvalidToken
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -338,6 +350,29 @@ func (t *Token) accessSet() accessSet {
|
|||||||
return accessSet
|
return accessSet
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *Token) resources() []auth.Resource {
|
||||||
|
if t.Claims == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
resourceSet := map[auth.Resource]struct{}{}
|
||||||
|
for _, resourceActions := range t.Claims.Access {
|
||||||
|
resource := auth.Resource{
|
||||||
|
Type: resourceActions.Type,
|
||||||
|
Class: resourceActions.Class,
|
||||||
|
Name: resourceActions.Name,
|
||||||
|
}
|
||||||
|
resourceSet[resource] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
resources := make([]auth.Resource, 0, len(resourceSet))
|
||||||
|
for resource := range resourceSet {
|
||||||
|
resources = append(resources, resource)
|
||||||
|
}
|
||||||
|
|
||||||
|
return resources
|
||||||
|
}
|
||||||
|
|
||||||
func (t *Token) compactRaw() string {
|
func (t *Token) compactRaw() string {
|
||||||
return fmt.Sprintf("%s.%s", t.Raw, joseBase64UrlEncode(t.Signature))
|
return fmt.Sprintf("%s.%s", t.Raw, joseBase64UrlEncode(t.Signature))
|
||||||
}
|
}
|
||||||
|
27
src/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go
generated
vendored
Normal file
27
src/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
package challenge
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FROM: https://golang.org/src/net/http/http.go
|
||||||
|
// Given a string of the form "host", "host:port", or "[ipv6::address]:port",
|
||||||
|
// return true if the string includes a port.
|
||||||
|
func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") }
|
||||||
|
|
||||||
|
// FROM: http://golang.org/src/net/http/transport.go
|
||||||
|
var portMap = map[string]string{
|
||||||
|
"http": "80",
|
||||||
|
"https": "443",
|
||||||
|
}
|
||||||
|
|
||||||
|
// canonicalAddr returns url.Host but always with a ":port" suffix
|
||||||
|
// FROM: http://golang.org/src/net/http/transport.go
|
||||||
|
func canonicalAddr(url *url.URL) string {
|
||||||
|
addr := url.Host
|
||||||
|
if !hasPort(addr) {
|
||||||
|
return addr + ":" + portMap[url.Scheme]
|
||||||
|
}
|
||||||
|
return addr
|
||||||
|
}
|
@ -1,10 +1,11 @@
|
|||||||
package auth
|
package challenge
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Challenge carries information from a WWW-Authenticate response header.
|
// Challenge carries information from a WWW-Authenticate response header.
|
||||||
@ -17,12 +18,12 @@ type Challenge struct {
|
|||||||
Parameters map[string]string
|
Parameters map[string]string
|
||||||
}
|
}
|
||||||
|
|
||||||
// ChallengeManager manages the challenges for endpoints.
|
// Manager manages the challenges for endpoints.
|
||||||
// The challenges are pulled out of HTTP responses. Only
|
// The challenges are pulled out of HTTP responses. Only
|
||||||
// responses which expect challenges should be added to
|
// responses which expect challenges should be added to
|
||||||
// the manager, since a non-unauthorized request will be
|
// the manager, since a non-unauthorized request will be
|
||||||
// viewed as not requiring challenges.
|
// viewed as not requiring challenges.
|
||||||
type ChallengeManager interface {
|
type Manager interface {
|
||||||
// GetChallenges returns the challenges for the given
|
// GetChallenges returns the challenges for the given
|
||||||
// endpoint URL.
|
// endpoint URL.
|
||||||
GetChallenges(endpoint url.URL) ([]Challenge, error)
|
GetChallenges(endpoint url.URL) ([]Challenge, error)
|
||||||
@ -36,36 +37,52 @@ type ChallengeManager interface {
|
|||||||
AddResponse(resp *http.Response) error
|
AddResponse(resp *http.Response) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSimpleChallengeManager returns an instance of
|
// NewSimpleManager returns an instance of
|
||||||
// ChallengeManger which only maps endpoints to challenges
|
// Manger which only maps endpoints to challenges
|
||||||
// based on the responses which have been added the
|
// based on the responses which have been added the
|
||||||
// manager. The simple manager will make no attempt to
|
// manager. The simple manager will make no attempt to
|
||||||
// perform requests on the endpoints or cache the responses
|
// perform requests on the endpoints or cache the responses
|
||||||
// to a backend.
|
// to a backend.
|
||||||
func NewSimpleChallengeManager() ChallengeManager {
|
func NewSimpleManager() Manager {
|
||||||
return simpleChallengeManager{}
|
return &simpleManager{
|
||||||
|
Challanges: make(map[string][]Challenge),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type simpleChallengeManager map[string][]Challenge
|
type simpleManager struct {
|
||||||
|
sync.RWMutex
|
||||||
|
Challanges map[string][]Challenge
|
||||||
|
}
|
||||||
|
|
||||||
func (m simpleChallengeManager) GetChallenges(endpoint url.URL) ([]Challenge, error) {
|
func normalizeURL(endpoint *url.URL) {
|
||||||
endpoint.Host = strings.ToLower(endpoint.Host)
|
endpoint.Host = strings.ToLower(endpoint.Host)
|
||||||
|
endpoint.Host = canonicalAddr(endpoint)
|
||||||
|
}
|
||||||
|
|
||||||
challenges := m[endpoint.String()]
|
func (m *simpleManager) GetChallenges(endpoint url.URL) ([]Challenge, error) {
|
||||||
|
normalizeURL(&endpoint)
|
||||||
|
|
||||||
|
m.RLock()
|
||||||
|
defer m.RUnlock()
|
||||||
|
challenges := m.Challanges[endpoint.String()]
|
||||||
return challenges, nil
|
return challenges, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m simpleChallengeManager) AddResponse(resp *http.Response) error {
|
func (m *simpleManager) AddResponse(resp *http.Response) error {
|
||||||
challenges := ResponseChallenges(resp)
|
challenges := ResponseChallenges(resp)
|
||||||
if resp.Request == nil {
|
if resp.Request == nil {
|
||||||
return fmt.Errorf("missing request reference")
|
return fmt.Errorf("missing request reference")
|
||||||
}
|
}
|
||||||
urlCopy := url.URL{
|
urlCopy := url.URL{
|
||||||
Path: resp.Request.URL.Path,
|
Path: resp.Request.URL.Path,
|
||||||
Host: strings.ToLower(resp.Request.URL.Host),
|
Host: resp.Request.URL.Host,
|
||||||
Scheme: resp.Request.URL.Scheme,
|
Scheme: resp.Request.URL.Scheme,
|
||||||
}
|
}
|
||||||
m[urlCopy.String()] = challenges
|
normalizeURL(&urlCopy)
|
||||||
|
|
||||||
|
m.Lock()
|
||||||
|
defer m.Unlock()
|
||||||
|
m.Challanges[urlCopy.String()] = challenges
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
41
src/vendor/github.com/docker/distribution/registry/client/auth/session.go
generated
vendored
41
src/vendor/github.com/docker/distribution/registry/client/auth/session.go
generated
vendored
@ -12,6 +12,7 @@ import (
|
|||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
"github.com/Sirupsen/logrus"
|
||||||
"github.com/docker/distribution/registry/client"
|
"github.com/docker/distribution/registry/client"
|
||||||
|
"github.com/docker/distribution/registry/client/auth/challenge"
|
||||||
"github.com/docker/distribution/registry/client/transport"
|
"github.com/docker/distribution/registry/client/transport"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -58,7 +59,7 @@ type CredentialStore interface {
|
|||||||
// schemes. The handlers are tried in order, the higher priority authentication
|
// schemes. The handlers are tried in order, the higher priority authentication
|
||||||
// methods should be first. The challengeMap holds a list of challenges for
|
// methods should be first. The challengeMap holds a list of challenges for
|
||||||
// a given root API endpoint (for example "https://registry-1.docker.io/v2/").
|
// a given root API endpoint (for example "https://registry-1.docker.io/v2/").
|
||||||
func NewAuthorizer(manager ChallengeManager, handlers ...AuthenticationHandler) transport.RequestModifier {
|
func NewAuthorizer(manager challenge.Manager, handlers ...AuthenticationHandler) transport.RequestModifier {
|
||||||
return &endpointAuthorizer{
|
return &endpointAuthorizer{
|
||||||
challenges: manager,
|
challenges: manager,
|
||||||
handlers: handlers,
|
handlers: handlers,
|
||||||
@ -66,21 +67,25 @@ func NewAuthorizer(manager ChallengeManager, handlers ...AuthenticationHandler)
|
|||||||
}
|
}
|
||||||
|
|
||||||
type endpointAuthorizer struct {
|
type endpointAuthorizer struct {
|
||||||
challenges ChallengeManager
|
challenges challenge.Manager
|
||||||
handlers []AuthenticationHandler
|
handlers []AuthenticationHandler
|
||||||
transport http.RoundTripper
|
transport http.RoundTripper
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ea *endpointAuthorizer) ModifyRequest(req *http.Request) error {
|
func (ea *endpointAuthorizer) ModifyRequest(req *http.Request) error {
|
||||||
v2Root := strings.Index(req.URL.Path, "/v2/")
|
pingPath := req.URL.Path
|
||||||
if v2Root == -1 {
|
if v2Root := strings.Index(req.URL.Path, "/v2/"); v2Root != -1 {
|
||||||
|
pingPath = pingPath[:v2Root+4]
|
||||||
|
} else if v1Root := strings.Index(req.URL.Path, "/v1/"); v1Root != -1 {
|
||||||
|
pingPath = pingPath[:v1Root] + "/v2/"
|
||||||
|
} else {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
ping := url.URL{
|
ping := url.URL{
|
||||||
Host: req.URL.Host,
|
Host: req.URL.Host,
|
||||||
Scheme: req.URL.Scheme,
|
Scheme: req.URL.Scheme,
|
||||||
Path: req.URL.Path[:v2Root+4],
|
Path: pingPath,
|
||||||
}
|
}
|
||||||
|
|
||||||
challenges, err := ea.challenges.GetChallenges(ping)
|
challenges, err := ea.challenges.GetChallenges(ping)
|
||||||
@ -90,11 +95,11 @@ func (ea *endpointAuthorizer) ModifyRequest(req *http.Request) error {
|
|||||||
|
|
||||||
if len(challenges) > 0 {
|
if len(challenges) > 0 {
|
||||||
for _, handler := range ea.handlers {
|
for _, handler := range ea.handlers {
|
||||||
for _, challenge := range challenges {
|
for _, c := range challenges {
|
||||||
if challenge.Scheme != handler.Scheme() {
|
if c.Scheme != handler.Scheme() {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err := handler.AuthorizeRequest(req, challenge.Parameters); err != nil {
|
if err := handler.AuthorizeRequest(req, c.Parameters); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -142,13 +147,31 @@ type Scope interface {
|
|||||||
// to a repository.
|
// to a repository.
|
||||||
type RepositoryScope struct {
|
type RepositoryScope struct {
|
||||||
Repository string
|
Repository string
|
||||||
|
Class string
|
||||||
Actions []string
|
Actions []string
|
||||||
}
|
}
|
||||||
|
|
||||||
// String returns the string representation of the repository
|
// String returns the string representation of the repository
|
||||||
// using the scope grammar
|
// using the scope grammar
|
||||||
func (rs RepositoryScope) String() string {
|
func (rs RepositoryScope) String() string {
|
||||||
return fmt.Sprintf("repository:%s:%s", rs.Repository, strings.Join(rs.Actions, ","))
|
repoType := "repository"
|
||||||
|
if rs.Class != "" {
|
||||||
|
repoType = fmt.Sprintf("%s(%s)", repoType, rs.Class)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s:%s:%s", repoType, rs.Repository, strings.Join(rs.Actions, ","))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegistryScope represents a token scope for access
|
||||||
|
// to resources in the registry.
|
||||||
|
type RegistryScope struct {
|
||||||
|
Name string
|
||||||
|
Actions []string
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns the string representation of the user
|
||||||
|
// using the scope grammar
|
||||||
|
func (rs RegistryScope) String() string {
|
||||||
|
return fmt.Sprintf("registry:%s:%s", rs.Name, strings.Join(rs.Actions, ","))
|
||||||
}
|
}
|
||||||
|
|
||||||
// TokenHandlerOptions is used to configure a new token handler
|
// TokenHandlerOptions is used to configure a new token handler
|
||||||
|
42
src/vendor/github.com/docker/distribution/registry/client/errors.go
generated
vendored
42
src/vendor/github.com/docker/distribution/registry/client/errors.go
generated
vendored
@ -9,6 +9,7 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"github.com/docker/distribution/registry/api/errcode"
|
"github.com/docker/distribution/registry/api/errcode"
|
||||||
|
"github.com/docker/distribution/registry/client/auth/challenge"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ErrNoErrorsInBody is returned when an HTTP response body parses to an empty
|
// ErrNoErrorsInBody is returned when an HTTP response body parses to an empty
|
||||||
@ -82,21 +83,52 @@ func parseHTTPErrorResponse(statusCode int, r io.Reader) error {
|
|||||||
return errors
|
return errors
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func makeErrorList(err error) []error {
|
||||||
|
if errL, ok := err.(errcode.Errors); ok {
|
||||||
|
return []error(errL)
|
||||||
|
}
|
||||||
|
return []error{err}
|
||||||
|
}
|
||||||
|
|
||||||
|
func mergeErrors(err1, err2 error) error {
|
||||||
|
return errcode.Errors(append(makeErrorList(err1), makeErrorList(err2)...))
|
||||||
|
}
|
||||||
|
|
||||||
// HandleErrorResponse returns error parsed from HTTP response for an
|
// HandleErrorResponse returns error parsed from HTTP response for an
|
||||||
// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An
|
// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An
|
||||||
// UnexpectedHTTPStatusError returned for response code outside of expected
|
// UnexpectedHTTPStatusError returned for response code outside of expected
|
||||||
// range.
|
// range.
|
||||||
func HandleErrorResponse(resp *http.Response) error {
|
func HandleErrorResponse(resp *http.Response) error {
|
||||||
if resp.StatusCode == 401 {
|
if resp.StatusCode >= 400 && resp.StatusCode < 500 {
|
||||||
|
// Check for OAuth errors within the `WWW-Authenticate` header first
|
||||||
|
// See https://tools.ietf.org/html/rfc6750#section-3
|
||||||
|
for _, c := range challenge.ResponseChallenges(resp) {
|
||||||
|
if c.Scheme == "bearer" {
|
||||||
|
var err errcode.Error
|
||||||
|
// codes defined at https://tools.ietf.org/html/rfc6750#section-3.1
|
||||||
|
switch c.Parameters["error"] {
|
||||||
|
case "invalid_token":
|
||||||
|
err.Code = errcode.ErrorCodeUnauthorized
|
||||||
|
case "insufficient_scope":
|
||||||
|
err.Code = errcode.ErrorCodeDenied
|
||||||
|
default:
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if description := c.Parameters["error_description"]; description != "" {
|
||||||
|
err.Message = description
|
||||||
|
} else {
|
||||||
|
err.Message = err.Code.Message()
|
||||||
|
}
|
||||||
|
|
||||||
|
return mergeErrors(err, parseHTTPErrorResponse(resp.StatusCode, resp.Body))
|
||||||
|
}
|
||||||
|
}
|
||||||
err := parseHTTPErrorResponse(resp.StatusCode, resp.Body)
|
err := parseHTTPErrorResponse(resp.StatusCode, resp.Body)
|
||||||
if uErr, ok := err.(*UnexpectedHTTPResponseError); ok {
|
if uErr, ok := err.(*UnexpectedHTTPResponseError); ok && resp.StatusCode == 401 {
|
||||||
return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response)
|
return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if resp.StatusCode >= 400 && resp.StatusCode < 500 {
|
|
||||||
return parseHTTPErrorResponse(resp.StatusCode, resp.Body)
|
|
||||||
}
|
|
||||||
return &UnexpectedHTTPStatusError{Status: resp.Status}
|
return &UnexpectedHTTPStatusError{Status: resp.Status}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
96
src/vendor/github.com/docker/distribution/registry/client/repository.go
generated
vendored
96
src/vendor/github.com/docker/distribution/registry/client/repository.go
generated
vendored
@ -10,6 +10,7 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/docker/distribution"
|
"github.com/docker/distribution"
|
||||||
@ -213,28 +214,35 @@ func (t *tags) All(ctx context.Context) ([]string, error) {
|
|||||||
return tags, err
|
return tags, err
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := t.client.Get(u)
|
for {
|
||||||
if err != nil {
|
resp, err := t.client.Get(u)
|
||||||
return tags, err
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
if SuccessStatus(resp.StatusCode) {
|
|
||||||
b, err := ioutil.ReadAll(resp.Body)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tags, err
|
return tags, err
|
||||||
}
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
tagsResponse := struct {
|
if SuccessStatus(resp.StatusCode) {
|
||||||
Tags []string `json:"tags"`
|
b, err := ioutil.ReadAll(resp.Body)
|
||||||
}{}
|
if err != nil {
|
||||||
if err := json.Unmarshal(b, &tagsResponse); err != nil {
|
return tags, err
|
||||||
return tags, err
|
}
|
||||||
|
|
||||||
|
tagsResponse := struct {
|
||||||
|
Tags []string `json:"tags"`
|
||||||
|
}{}
|
||||||
|
if err := json.Unmarshal(b, &tagsResponse); err != nil {
|
||||||
|
return tags, err
|
||||||
|
}
|
||||||
|
tags = append(tags, tagsResponse.Tags...)
|
||||||
|
if link := resp.Header.Get("Link"); link != "" {
|
||||||
|
u = strings.Trim(strings.Split(link, ";")[0], "<>")
|
||||||
|
} else {
|
||||||
|
return tags, nil
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return tags, HandleErrorResponse(resp)
|
||||||
}
|
}
|
||||||
tags = tagsResponse.Tags
|
|
||||||
return tags, nil
|
|
||||||
}
|
}
|
||||||
return tags, HandleErrorResponse(resp)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func descriptorFromResponse(response *http.Response) (distribution.Descriptor, error) {
|
func descriptorFromResponse(response *http.Response) (distribution.Descriptor, error) {
|
||||||
@ -293,18 +301,20 @@ func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, er
|
|||||||
return distribution.Descriptor{}, err
|
return distribution.Descriptor{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
req, err := http.NewRequest("HEAD", u, nil)
|
newRequest := func(method string) (*http.Response, error) {
|
||||||
if err != nil {
|
req, err := http.NewRequest(method, u, nil)
|
||||||
return distribution.Descriptor{}, err
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, t := range distribution.ManifestMediaTypes() {
|
||||||
|
req.Header.Add("Accept", t)
|
||||||
|
}
|
||||||
|
resp, err := t.client.Do(req)
|
||||||
|
return resp, err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, t := range distribution.ManifestMediaTypes() {
|
resp, err := newRequest("HEAD")
|
||||||
req.Header.Add("Accept", t)
|
|
||||||
}
|
|
||||||
|
|
||||||
var attempts int
|
|
||||||
resp, err := t.client.Do(req)
|
|
||||||
check:
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return distribution.Descriptor{}, err
|
return distribution.Descriptor{}, err
|
||||||
}
|
}
|
||||||
@ -313,23 +323,20 @@ check:
|
|||||||
switch {
|
switch {
|
||||||
case resp.StatusCode >= 200 && resp.StatusCode < 400:
|
case resp.StatusCode >= 200 && resp.StatusCode < 400:
|
||||||
return descriptorFromResponse(resp)
|
return descriptorFromResponse(resp)
|
||||||
case resp.StatusCode == http.StatusMethodNotAllowed:
|
default:
|
||||||
req, err = http.NewRequest("GET", u, nil)
|
// if the response is an error - there will be no body to decode.
|
||||||
|
// Issue a GET request:
|
||||||
|
// - for data from a server that does not handle HEAD
|
||||||
|
// - to get error details in case of a failure
|
||||||
|
resp, err = newRequest("GET")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return distribution.Descriptor{}, err
|
return distribution.Descriptor{}, err
|
||||||
}
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
for _, t := range distribution.ManifestMediaTypes() {
|
if resp.StatusCode >= 200 && resp.StatusCode < 400 {
|
||||||
req.Header.Add("Accept", t)
|
return descriptorFromResponse(resp)
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err = t.client.Do(req)
|
|
||||||
attempts++
|
|
||||||
if attempts > 1 {
|
|
||||||
return distribution.Descriptor{}, err
|
|
||||||
}
|
|
||||||
goto check
|
|
||||||
default:
|
|
||||||
return distribution.Descriptor{}, HandleErrorResponse(resp)
|
return distribution.Descriptor{}, HandleErrorResponse(resp)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -672,15 +679,6 @@ func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribut
|
|||||||
return writer.Commit(ctx, desc)
|
return writer.Commit(ctx, desc)
|
||||||
}
|
}
|
||||||
|
|
||||||
// createOptions is a collection of blob creation modifiers relevant to general
|
|
||||||
// blob storage intended to be configured by the BlobCreateOption.Apply method.
|
|
||||||
type createOptions struct {
|
|
||||||
Mount struct {
|
|
||||||
ShouldMount bool
|
|
||||||
From reference.Canonical
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type optionFunc func(interface{}) error
|
type optionFunc func(interface{}) error
|
||||||
|
|
||||||
func (f optionFunc) Apply(v interface{}) error {
|
func (f optionFunc) Apply(v interface{}) error {
|
||||||
@ -691,7 +689,7 @@ func (f optionFunc) Apply(v interface{}) error {
|
|||||||
// mounted from the given canonical reference.
|
// mounted from the given canonical reference.
|
||||||
func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption {
|
func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption {
|
||||||
return optionFunc(func(v interface{}) error {
|
return optionFunc(func(v interface{}) error {
|
||||||
opts, ok := v.(*createOptions)
|
opts, ok := v.(*distribution.CreateOptions)
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("unexpected options type: %T", v)
|
return fmt.Errorf("unexpected options type: %T", v)
|
||||||
}
|
}
|
||||||
@ -704,7 +702,7 @@ func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) {
|
func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) {
|
||||||
var opts createOptions
|
var opts distribution.CreateOptions
|
||||||
|
|
||||||
for _, option := range options {
|
for _, option := range options {
|
||||||
err := option.Apply(&opts)
|
err := option.Apply(&opts)
|
||||||
|
1
src/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go
generated
vendored
1
src/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go
generated
vendored
@ -181,6 +181,7 @@ func (hrs *httpReadSeeker) reader() (io.Reader, error) {
|
|||||||
// context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range"))
|
// context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range"))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
req.Header.Add("Accept-Encoding", "identity")
|
||||||
resp, err := hrs.client.Do(req)
|
resp, err := hrs.client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
33
src/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go
generated
vendored
33
src/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go
generated
vendored
@ -77,37 +77,46 @@ type repositoryScopedInMemoryBlobDescriptorCache struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
|
func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
|
||||||
if rsimbdcp.repository == nil {
|
rsimbdcp.parent.mu.Lock()
|
||||||
|
repo := rsimbdcp.repository
|
||||||
|
rsimbdcp.parent.mu.Unlock()
|
||||||
|
|
||||||
|
if repo == nil {
|
||||||
return distribution.Descriptor{}, distribution.ErrBlobUnknown
|
return distribution.Descriptor{}, distribution.ErrBlobUnknown
|
||||||
}
|
}
|
||||||
|
|
||||||
return rsimbdcp.repository.Stat(ctx, dgst)
|
return repo.Stat(ctx, dgst)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error {
|
func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error {
|
||||||
if rsimbdcp.repository == nil {
|
rsimbdcp.parent.mu.Lock()
|
||||||
|
repo := rsimbdcp.repository
|
||||||
|
rsimbdcp.parent.mu.Unlock()
|
||||||
|
|
||||||
|
if repo == nil {
|
||||||
return distribution.ErrBlobUnknown
|
return distribution.ErrBlobUnknown
|
||||||
}
|
}
|
||||||
|
|
||||||
return rsimbdcp.repository.Clear(ctx, dgst)
|
return repo.Clear(ctx, dgst)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
|
func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
|
||||||
if rsimbdcp.repository == nil {
|
rsimbdcp.parent.mu.Lock()
|
||||||
|
repo := rsimbdcp.repository
|
||||||
|
if repo == nil {
|
||||||
// allocate map since we are setting it now.
|
// allocate map since we are setting it now.
|
||||||
rsimbdcp.parent.mu.Lock()
|
|
||||||
var ok bool
|
var ok bool
|
||||||
// have to read back value since we may have allocated elsewhere.
|
// have to read back value since we may have allocated elsewhere.
|
||||||
rsimbdcp.repository, ok = rsimbdcp.parent.repositories[rsimbdcp.repo]
|
repo, ok = rsimbdcp.parent.repositories[rsimbdcp.repo]
|
||||||
if !ok {
|
if !ok {
|
||||||
rsimbdcp.repository = newMapBlobDescriptorCache()
|
repo = newMapBlobDescriptorCache()
|
||||||
rsimbdcp.parent.repositories[rsimbdcp.repo] = rsimbdcp.repository
|
rsimbdcp.parent.repositories[rsimbdcp.repo] = repo
|
||||||
}
|
}
|
||||||
|
rsimbdcp.repository = repo
|
||||||
rsimbdcp.parent.mu.Unlock()
|
|
||||||
}
|
}
|
||||||
|
rsimbdcp.parent.mu.Unlock()
|
||||||
|
|
||||||
if err := rsimbdcp.repository.SetDescriptor(ctx, dgst, desc); err != nil {
|
if err := repo.SetDescriptor(ctx, dgst, desc); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
136
src/vendor/vendor.json
vendored
136
src/vendor/vendor.json
vendored
@ -159,140 +159,112 @@
|
|||||||
"versionExact": "v3.0.0"
|
"versionExact": "v3.0.0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "dyW7eJt0inBkevS8lV0eb0Pm6MA=",
|
"checksumSHA1": "tz1lZdR0AlIRg6Aqov+ccO3k+Ko=",
|
||||||
"path": "github.com/docker/distribution",
|
"path": "github.com/docker/distribution",
|
||||||
"revision": "12acdf0a6c1e56d965ac6eb395d2bce687bf22fc",
|
"revision": "325b0804fef3a66309d962357aac3c2ce3f4d329",
|
||||||
"revisionTime": "2016-08-06T00:21:48Z",
|
"revisionTime": "2017-01-18T00:16:05Z"
|
||||||
"version": "v2.5.1",
|
|
||||||
"versionExact": "v2.5.1"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "qyi/ywyMfrOzLHOFUmfptEgicyQ=",
|
"checksumSHA1": "0au+tD+jymXNssdb1JgcctY7PN4=",
|
||||||
"path": "github.com/docker/distribution/context",
|
"path": "github.com/docker/distribution/context",
|
||||||
"revision": "12acdf0a6c1e56d965ac6eb395d2bce687bf22fc",
|
"revision": "325b0804fef3a66309d962357aac3c2ce3f4d329",
|
||||||
"revisionTime": "2016-08-06T00:21:48Z",
|
"revisionTime": "2017-01-18T00:16:05Z"
|
||||||
"version": "v2.5.1",
|
|
||||||
"versionExact": "v2.5.1"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "f1wARLDzsF/JoyN01yoxXEwFIp8=",
|
"checksumSHA1": "f1wARLDzsF/JoyN01yoxXEwFIp8=",
|
||||||
"path": "github.com/docker/distribution/digest",
|
"path": "github.com/docker/distribution/digest",
|
||||||
"revision": "12acdf0a6c1e56d965ac6eb395d2bce687bf22fc",
|
"revision": "325b0804fef3a66309d962357aac3c2ce3f4d329",
|
||||||
"revisionTime": "2016-08-06T00:21:48Z",
|
"revisionTime": "2017-01-18T00:16:05Z"
|
||||||
"version": "v2.5.1",
|
|
||||||
"versionExact": "v2.5.1"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "9ymKULEPvYfNX+PCqIZfq5CMP/c=",
|
"checksumSHA1": "oYy5Q1HBImMQvh9t96cmNzWar80=",
|
||||||
"path": "github.com/docker/distribution/manifest",
|
"path": "github.com/docker/distribution/manifest",
|
||||||
"revision": "12acdf0a6c1e56d965ac6eb395d2bce687bf22fc",
|
"revision": "325b0804fef3a66309d962357aac3c2ce3f4d329",
|
||||||
"revisionTime": "2016-08-06T00:21:48Z",
|
"revisionTime": "2017-01-18T00:16:05Z"
|
||||||
"version": "v2.5.1",
|
|
||||||
"versionExact": "v2.5.1"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "XFMuuUrVW18yXJr0Br+RNh6lpf8=",
|
"checksumSHA1": "ruHCsJGc7fTKfZJKg+Q90F1iMfY=",
|
||||||
"path": "github.com/docker/distribution/manifest/schema1",
|
"path": "github.com/docker/distribution/manifest/schema1",
|
||||||
"revision": "12acdf0a6c1e56d965ac6eb395d2bce687bf22fc",
|
"revision": "325b0804fef3a66309d962357aac3c2ce3f4d329",
|
||||||
"revisionTime": "2016-08-06T00:21:48Z",
|
"revisionTime": "2017-01-18T00:16:05Z"
|
||||||
"version": "v2.5.1",
|
|
||||||
"versionExact": "v2.5.1"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "ZuTHl2f1hNYKoBnQXFXxYtwXg6Y=",
|
"checksumSHA1": "rsnafP6fWFIUdA5FM13FPmIYblo=",
|
||||||
"path": "github.com/docker/distribution/manifest/schema2",
|
"path": "github.com/docker/distribution/manifest/schema2",
|
||||||
"revision": "12acdf0a6c1e56d965ac6eb395d2bce687bf22fc",
|
"revision": "325b0804fef3a66309d962357aac3c2ce3f4d329",
|
||||||
"revisionTime": "2016-08-06T00:21:48Z",
|
"revisionTime": "2017-01-18T00:16:05Z"
|
||||||
"version": "v2.5.1",
|
|
||||||
"versionExact": "v2.5.1"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "PzXRTLmmqWXxmDqdIXLcRYBma18=",
|
"checksumSHA1": "Afja02fDYNayq+FvrfyAgPBtW6Y=",
|
||||||
"path": "github.com/docker/distribution/reference",
|
"path": "github.com/docker/distribution/reference",
|
||||||
"revision": "12acdf0a6c1e56d965ac6eb395d2bce687bf22fc",
|
"revision": "325b0804fef3a66309d962357aac3c2ce3f4d329",
|
||||||
"revisionTime": "2016-08-06T00:21:48Z",
|
"revisionTime": "2017-01-18T00:16:05Z"
|
||||||
"version": "v2.5.1",
|
|
||||||
"versionExact": "v2.5.1"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "gVQRg7cbsvj1rhXm3LrbaoaDKbA=",
|
"checksumSHA1": "ClxxEM8HAe3DrneFwpUoIgoW+XA=",
|
||||||
"path": "github.com/docker/distribution/registry/api/errcode",
|
"path": "github.com/docker/distribution/registry/api/errcode",
|
||||||
"revision": "12acdf0a6c1e56d965ac6eb395d2bce687bf22fc",
|
"revision": "325b0804fef3a66309d962357aac3c2ce3f4d329",
|
||||||
"revisionTime": "2016-08-06T00:21:48Z",
|
"revisionTime": "2017-01-18T00:16:05Z"
|
||||||
"version": "v2.5.1",
|
|
||||||
"versionExact": "v2.5.1"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "8eDeP6DuTNsskvENoAyICs/QyN0=",
|
"checksumSHA1": "AdqP2O9atmZXE2SUf28oONdslxI=",
|
||||||
"path": "github.com/docker/distribution/registry/api/v2",
|
"path": "github.com/docker/distribution/registry/api/v2",
|
||||||
"revision": "12acdf0a6c1e56d965ac6eb395d2bce687bf22fc",
|
"revision": "325b0804fef3a66309d962357aac3c2ce3f4d329",
|
||||||
"revisionTime": "2016-08-06T00:21:48Z",
|
"revisionTime": "2017-01-18T00:16:05Z"
|
||||||
"version": "v2.5.1",
|
|
||||||
"versionExact": "v2.5.1"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "Ew29UsY4X4DuSd/DRz3MoMsocvs=",
|
"checksumSHA1": "j9kYvq02nJOTQmEH3wUw2Z/ybd8=",
|
||||||
"path": "github.com/docker/distribution/registry/auth",
|
"path": "github.com/docker/distribution/registry/auth",
|
||||||
"revision": "12acdf0a6c1e56d965ac6eb395d2bce687bf22fc",
|
"revision": "325b0804fef3a66309d962357aac3c2ce3f4d329",
|
||||||
"revisionTime": "2016-08-06T00:21:48Z",
|
"revisionTime": "2017-01-18T00:16:05Z"
|
||||||
"version": "v2.5.1",
|
|
||||||
"versionExact": "v2.5.1"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "k/JJaY5xB9e/fMDg14wFQQbRYe0=",
|
"checksumSHA1": "jnz1fQsQzotasJdsfzst+lVpmlo=",
|
||||||
"path": "github.com/docker/distribution/registry/auth/token",
|
"path": "github.com/docker/distribution/registry/auth/token",
|
||||||
"revision": "12acdf0a6c1e56d965ac6eb395d2bce687bf22fc",
|
"revision": "325b0804fef3a66309d962357aac3c2ce3f4d329",
|
||||||
"revisionTime": "2016-08-06T00:21:48Z",
|
"revisionTime": "2017-01-18T00:16:05Z"
|
||||||
"version": "v2.5.1",
|
|
||||||
"versionExact": "v2.5.1"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "kUKO43CFnGqE5o3jwfijuIk/XQQ=",
|
"checksumSHA1": "+BD1MapPtKWpc2NAMAVL9LzHErk=",
|
||||||
"path": "github.com/docker/distribution/registry/client",
|
"path": "github.com/docker/distribution/registry/client",
|
||||||
"revision": "12acdf0a6c1e56d965ac6eb395d2bce687bf22fc",
|
"revision": "325b0804fef3a66309d962357aac3c2ce3f4d329",
|
||||||
"revisionTime": "2016-08-06T00:21:48Z",
|
"revisionTime": "2017-01-18T00:16:05Z"
|
||||||
"version": "v2.5.1",
|
|
||||||
"versionExact": "v2.5.1"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "sENYoE05tfuTC38SKYC+lKCDI1s=",
|
"checksumSHA1": "g1oeZgaYxQTmd8bzYTNe4neymLY=",
|
||||||
"path": "github.com/docker/distribution/registry/client/auth",
|
"path": "github.com/docker/distribution/registry/client/auth",
|
||||||
"revision": "12acdf0a6c1e56d965ac6eb395d2bce687bf22fc",
|
"revision": "325b0804fef3a66309d962357aac3c2ce3f4d329",
|
||||||
"revisionTime": "2016-08-06T00:21:48Z",
|
"revisionTime": "2017-01-18T00:16:05Z"
|
||||||
"version": "v2.5.1",
|
|
||||||
"versionExact": "v2.5.1"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "VDfI80dOZWOJU7+neHqv2uT5Th8=",
|
"checksumSHA1": "HT3SwoOQunakEwoxg6rAvy94aLs=",
|
||||||
|
"path": "github.com/docker/distribution/registry/client/auth/challenge",
|
||||||
|
"revision": "325b0804fef3a66309d962357aac3c2ce3f4d329",
|
||||||
|
"revisionTime": "2017-01-18T00:16:05Z"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"checksumSHA1": "KjpG7FYMU5ugtc/fTfL1YqhdaV4=",
|
||||||
"path": "github.com/docker/distribution/registry/client/transport",
|
"path": "github.com/docker/distribution/registry/client/transport",
|
||||||
"revision": "12acdf0a6c1e56d965ac6eb395d2bce687bf22fc",
|
"revision": "325b0804fef3a66309d962357aac3c2ce3f4d329",
|
||||||
"revisionTime": "2016-08-06T00:21:48Z",
|
"revisionTime": "2017-01-18T00:16:05Z"
|
||||||
"version": "v2.5.1",
|
|
||||||
"versionExact": "v2.5.1"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "OfCHyYvzswfb+mAswNnEJmiQSq4=",
|
"checksumSHA1": "OfCHyYvzswfb+mAswNnEJmiQSq4=",
|
||||||
"path": "github.com/docker/distribution/registry/storage/cache",
|
"path": "github.com/docker/distribution/registry/storage/cache",
|
||||||
"revision": "12acdf0a6c1e56d965ac6eb395d2bce687bf22fc",
|
"revision": "325b0804fef3a66309d962357aac3c2ce3f4d329",
|
||||||
"revisionTime": "2016-08-06T00:21:48Z",
|
"revisionTime": "2017-01-18T00:16:05Z"
|
||||||
"version": "v2.5.1",
|
|
||||||
"versionExact": "v2.5.1"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "Uly2iPDPUbNtilosM7ERs1ZrcAY=",
|
"checksumSHA1": "ruzWihEQ6o3c2MIl+5bAXijBMSg=",
|
||||||
"path": "github.com/docker/distribution/registry/storage/cache/memory",
|
"path": "github.com/docker/distribution/registry/storage/cache/memory",
|
||||||
"revision": "12acdf0a6c1e56d965ac6eb395d2bce687bf22fc",
|
"revision": "325b0804fef3a66309d962357aac3c2ce3f4d329",
|
||||||
"revisionTime": "2016-08-06T00:21:48Z",
|
"revisionTime": "2017-01-18T00:16:05Z"
|
||||||
"version": "v2.5.1",
|
|
||||||
"versionExact": "v2.5.1"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "cNp7rNReJHvdSfrIetXS9RGsLSo=",
|
"checksumSHA1": "cNp7rNReJHvdSfrIetXS9RGsLSo=",
|
||||||
"path": "github.com/docker/distribution/uuid",
|
"path": "github.com/docker/distribution/uuid",
|
||||||
"revision": "12acdf0a6c1e56d965ac6eb395d2bce687bf22fc",
|
"revision": "325b0804fef3a66309d962357aac3c2ce3f4d329",
|
||||||
"revisionTime": "2016-08-06T00:21:48Z",
|
"revisionTime": "2017-01-18T00:16:05Z"
|
||||||
"version": "v2.5.1",
|
|
||||||
"versionExact": "v2.5.1"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "fpCMwlzpltN7rBIJoaDMQaIbGMc=",
|
"checksumSHA1": "fpCMwlzpltN7rBIJoaDMQaIbGMc=",
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
version: '2'
|
version: '2'
|
||||||
services:
|
services:
|
||||||
registry:
|
registry:
|
||||||
image: library/registry:2.5.1
|
image: library/registry:2.6.0
|
||||||
restart: always
|
restart: always
|
||||||
volumes:
|
volumes:
|
||||||
- /data/registry:/storage
|
- /data/registry:/storage
|
||||||
|
Loading…
x
Reference in New Issue
Block a user